diff --git a/.gitignore b/.gitignore
index c49e6c006c..c56a658ce2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,6 +20,7 @@ tests/core/pyspec/eth2spec/altair/
tests/core/pyspec/eth2spec/bellatrix/
tests/core/pyspec/eth2spec/capella/
tests/core/pyspec/eth2spec/deneb/
+tests/core/pyspec/eth2spec/eip6110/
# coverage reports
.htmlcov
diff --git a/Makefile b/Makefile
index d4259b2fe9..1ec399e3a8 100644
--- a/Makefile
+++ b/Makefile
@@ -23,16 +23,18 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER
# To check generator matching:
#$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}])
-MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) \
- $(wildcard $(SPEC_DIR)/altair/*.md) $(wildcard $(SPEC_DIR)/altair/**/*.md) \
- $(wildcard $(SPEC_DIR)/bellatrix/*.md) \
- $(wildcard $(SPEC_DIR)/capella/*.md) $(wildcard $(SPEC_DIR)/capella/**/*.md) \
- $(wildcard $(SPEC_DIR)/deneb/*.md) $(wildcard $(SPEC_DIR)/deneb/**/*.md) \
- $(wildcard $(SPEC_DIR)/_features/custody/*.md) \
- $(wildcard $(SPEC_DIR)/_features/das/*.md) \
- $(wildcard $(SPEC_DIR)/_features/sharding/*.md) \
+MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \
+ $(wildcard $(SPEC_DIR)/*/*/*.md) \
+ $(wildcard $(SPEC_DIR)/_features/*/*.md) \
+ $(wildcard $(SPEC_DIR)/_features/*/*/*.md) \
$(wildcard $(SSZ_DIR)/*.md)
+ALL_EXECUTABLE_SPECS = phase0 altair bellatrix capella deneb eip6110
+# The parameters for commands. Use `foreach` to avoid listing specs again.
+COVERAGE_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), --cov=eth2spec.$S.$(TEST_PRESET_TYPE))
+PYLINT_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), ./eth2spec/$S)
+MYPY_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), -p eth2spec.$S)
+
COV_HTML_OUT=.htmlcov
COV_HTML_OUT_DIR=$(PY_SPEC_DIR)/$(COV_HTML_OUT)
COV_INDEX_FILE=$(COV_HTML_OUT_DIR)/index.html
@@ -63,15 +65,14 @@ partial_clean:
rm -f .coverage
rm -rf $(PY_SPEC_DIR)/.pytest_cache
rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/.pytest_cache
- rm -rf $(ETH2SPEC_MODULE_DIR)/phase0
- rm -rf $(ETH2SPEC_MODULE_DIR)/altair
- rm -rf $(ETH2SPEC_MODULE_DIR)/bellatrix
- rm -rf $(ETH2SPEC_MODULE_DIR)/capella
- rm -rf $(ETH2SPEC_MODULE_DIR)/deneb
rm -rf $(COV_HTML_OUT_DIR)
rm -rf $(TEST_REPORT_DIR)
rm -rf eth2spec.egg-info dist build
- rm -rf build
+ rm -rf build;
+ @for spec_name in $(ALL_EXECUTABLE_SPECS) ; do \
+ echo $$spec_name; \
+ rm -rf $(ETH2SPEC_MODULE_DIR)/$$spec_name; \
+ done
clean: partial_clean
rm -rf venv
@@ -105,21 +106,21 @@ install_test:
# Testing against `minimal` or `mainnet` config by default
test: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
- python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.deneb.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
+ python3 -m pytest -n 4 --disable-bls $(COVERAGE_SCOPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
# Testing against `minimal` or `mainnet` config by default
find_test: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
- python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.deneb.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
+ python3 -m pytest -k=$(K) --disable-bls $(COVERAGE_SCOPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
citest: pyspec
mkdir -p $(TEST_REPORT_DIR);
ifdef fork
. venv/bin/activate; cd $(PY_SPEC_DIR); \
- python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec
+ python3 -m pytest -n 16 --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec
else
. venv/bin/activate; cd $(PY_SPEC_DIR); \
- python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec
+ python3 -m pytest -n 16 --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec
endif
@@ -137,13 +138,11 @@ check_toc: $(MARKDOWN_FILES:=.toc)
codespell:
codespell . --skip "./.git,./venv,$(PY_SPEC_DIR)/.mypy_cache" -I .codespell-whitelist
-# TODO: add future protocol upgrade patch packages to linting.
-# NOTE: we use `pylint` just for catching unused arguments in spec code
lint: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
- && pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/deneb \
- && mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.deneb
+ && pylint --rcfile $(LINTER_CONFIG_FILE) $(PYLINT_SCOPE) \
+ && mypy --config-file $(LINTER_CONFIG_FILE) $(MYPY_SCOPE)
lint_generators: pyspec
. venv/bin/activate; cd $(TEST_GENERATORS_DIR); \
diff --git a/README.md b/README.md
index 49e1c3a4d9..61600d3890 100644
--- a/README.md
+++ b/README.md
@@ -29,6 +29,7 @@ Features are researched and developed in parallel, and then consolidated into se
| Sharding (outdated) |
- Core
- [Beacon Chain changes](specs/_features/sharding/beacon-chain.md)
- Additions
- [P2P networking](specs/_features/sharding/p2p-interface.md)
|
| Custody Game (outdated) | - Core
- [Beacon Chain changes](specs/_features/custody_game/beacon-chain.md)
- Additions
- [Honest validator guide changes](specs/_features/custody_game/validator.md)
| Dependent on sharding |
| Data Availability Sampling (outdated) | - Core
- [Core types and functions](specs/_features/das/das-core.md)
- [Fork choice changes](specs/_features/das/fork-choice.md)
- Additions
- [P2P Networking](specs/_features/das/p2p-interface.md)
- [Sampling process](specs/_features/das/sampling.md)
| - Dependent on sharding
- [Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)
|
+| EIP-6110 | - Core
- [Beacon Chain changes](specs/_features/eip6110//beacon-chain.md)
- [EIP-6110 fork](specs/_features/eip6110/fork.md)
- Additions
- [Honest validator guide changes](specs/_features/eip6110/validator.md)
|
### Accompanying documents can be found in [specs](specs) and include:
diff --git a/SECURITY.md b/SECURITY.md
index e46fab4de1..2101ea1554 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -8,4 +8,4 @@ Please see [Releases](https://github.com/ethereum/consensus-specs/releases/). We
**Please do not file a public ticket** mentioning the vulnerability.
-To find out how to disclose a vulnerability in the Ethereum Consensus Layer visit [https://eth2bounty.ethereum.org](https://eth2bounty.ethereum.org) or email eth2bounty@ethereum.org. Please read the [disclosure page](https://eth2bounty.ethereum.org) for more information about publicly disclosed security vulnerabilities.
+To find out how to disclose a vulnerability in the Ethereum Consensus Layer visit [https://ethereum.org/bug-bounty](https://ethereum.org/bug-bounty) or email bounty@ethereum.org. Please read the [disclosure page](https://ethereum.org/bug-bounty) for more information about publicly disclosed security vulnerabilities.
diff --git a/presets/mainnet/phase0.yaml b/presets/mainnet/phase0.yaml
index 89bb97d6a8..02bc96c8cd 100644
--- a/presets/mainnet/phase0.yaml
+++ b/presets/mainnet/phase0.yaml
@@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1
HYSTERESIS_UPWARD_MULTIPLIER: 5
-# Fork Choice
-# ---------------------------------------------------------------
-# 2**3 (= 8)
-SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8
-
-
# Gwei values
# ---------------------------------------------------------------
# 2**0 * 10**9 (= 1,000,000,000) Gwei
diff --git a/presets/minimal/phase0.yaml b/presets/minimal/phase0.yaml
index 2c6fbb3691..e7028f5a42 100644
--- a/presets/minimal/phase0.yaml
+++ b/presets/minimal/phase0.yaml
@@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1
HYSTERESIS_UPWARD_MULTIPLIER: 5
-# Fork Choice
-# ---------------------------------------------------------------
-# 2**1 (= 1)
-SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2
-
-
# Gwei values
# ---------------------------------------------------------------
# 2**0 * 10**9 (= 1,000,000,000) Gwei
diff --git a/setup.py b/setup.py
index 9c5488f126..52bad2b71b 100644
--- a/setup.py
+++ b/setup.py
@@ -47,6 +47,7 @@ def installPackage(package: str):
BELLATRIX = 'bellatrix'
CAPELLA = 'capella'
DENEB = 'deneb'
+EIP6110 = 'eip6110'
# The helper functions that are used when defining constants
@@ -667,9 +668,22 @@ def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
return {**super().hardcoded_custom_type_dep_constants(spec_object), **constants}
+#
+# EIP6110SpecBuilder
+#
+class EIP6110SpecBuilder(CapellaSpecBuilder):
+ fork: str = EIP6110
+
+ @classmethod
+ def imports(cls, preset_name: str):
+ return super().imports(preset_name) + f'''
+from eth2spec.capella import {preset_name} as capella
+'''
+
+
spec_builders = {
builder.fork: builder
- for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder)
+ for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder, EIP6110SpecBuilder)
}
@@ -968,14 +982,14 @@ def finalize_options(self):
if len(self.md_doc_paths) == 0:
print("no paths were specified, using default markdown file paths for pyspec"
" build (spec fork: %s)" % self.spec_fork)
- if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB):
+ if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110):
self.md_doc_paths = """
specs/phase0/beacon-chain.md
specs/phase0/fork-choice.md
specs/phase0/validator.md
specs/phase0/weak-subjectivity.md
"""
- if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, DENEB):
+ if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110):
self.md_doc_paths += """
specs/altair/light-client/full-node.md
specs/altair/light-client/light-client.md
@@ -987,7 +1001,7 @@ def finalize_options(self):
specs/altair/validator.md
specs/altair/p2p-interface.md
"""
- if self.spec_fork in (BELLATRIX, CAPELLA, DENEB):
+ if self.spec_fork in (BELLATRIX, CAPELLA, DENEB, EIP6110):
self.md_doc_paths += """
specs/bellatrix/beacon-chain.md
specs/bellatrix/fork.md
@@ -996,7 +1010,7 @@ def finalize_options(self):
specs/bellatrix/p2p-interface.md
sync/optimistic.md
"""
- if self.spec_fork in (CAPELLA, DENEB):
+ if self.spec_fork in (CAPELLA, DENEB, EIP6110):
self.md_doc_paths += """
specs/capella/light-client/fork.md
specs/capella/light-client/full-node.md
@@ -1021,6 +1035,11 @@ def finalize_options(self):
specs/deneb/p2p-interface.md
specs/deneb/validator.md
"""
+ if self.spec_fork == EIP6110:
+ self.md_doc_paths += """
+ specs/_features/eip6110/beacon-chain.md
+ specs/_features/eip6110/fork.md
+ """
if len(self.md_doc_paths) == 0:
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
@@ -1174,5 +1193,6 @@ def run(self):
RUAMEL_YAML_VERSION,
"lru-dict==1.1.8",
MARKO_VERSION,
+ "py_arkworks_bls12381==0.3.4",
]
)
diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md
new file mode 100644
index 0000000000..70a72a5f45
--- /dev/null
+++ b/specs/_features/eip6110/beacon-chain.md
@@ -0,0 +1,324 @@
+# EIP-6110 -- The Beacon Chain
+
+## Table of contents
+
+
+
+
+
+- [Introduction](#introduction)
+- [Constants](#constants)
+ - [Misc](#misc)
+- [Preset](#preset)
+ - [Execution](#execution)
+- [Containers](#containers)
+ - [New containers](#new-containers)
+ - [`DepositReceipt`](#depositreceipt)
+ - [Extended Containers](#extended-containers)
+ - [`ExecutionPayload`](#executionpayload)
+ - [`ExecutionPayloadHeader`](#executionpayloadheader)
+ - [`BeaconState`](#beaconstate)
+- [Beacon chain state transition function](#beacon-chain-state-transition-function)
+ - [Block processing](#block-processing)
+ - [Modified `process_operations`](#modified-process_operations)
+ - [New `process_deposit_receipt`](#new-process_deposit_receipt)
+ - [Modified `process_execution_payload`](#modified-process_execution_payload)
+- [Testing](#testing)
+
+
+
+
+## Introduction
+
+This is the beacon chain specification of in-protocol deposits processing mechanism.
+This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum.org/EIPS/eip-6110).
+
+*Note:* This specification is built upon [Capella](../../capella/beacon_chain.md) and is under active development.
+
+## Constants
+
+The following values are (non-configurable) constants used throughout the specification.
+
+### Misc
+
+| Name | Value |
+| - | - |
+| `UNSET_DEPOSIT_RECEIPTS_START_INDEX` | `uint64(2**64 - 1)` |
+
+## Preset
+
+### Execution
+
+| Name | Value | Description |
+| - | - | - |
+| `MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD` | `uint64(2**13)` (= 8,192) | Maximum number of deposit receipts allowed in each payload |
+
+## Containers
+
+### New containers
+
+#### `DepositReceipt`
+
+```python
+class DepositReceipt(Container):
+ pubkey: BLSPubkey
+ withdrawal_credentials: Bytes32
+ amount: Gwei
+ signature: BLSSignature
+ index: uint64
+```
+
+### Extended Containers
+
+#### `ExecutionPayload`
+
+```python
+class ExecutionPayload(Container):
+ # Execution block header fields
+ parent_hash: Hash32
+ fee_recipient: ExecutionAddress
+ state_root: Bytes32
+ receipts_root: Bytes32
+ logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
+ prev_randao: Bytes32
+ block_number: uint64
+ gas_limit: uint64
+ gas_used: uint64
+ timestamp: uint64
+ extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
+ base_fee_per_gas: uint256
+ # Extra payload fields
+ block_hash: Hash32
+ transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
+ withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
+ deposit_receipts: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] # [New in EIP-6110]
+```
+
+#### `ExecutionPayloadHeader`
+
+```python
+class ExecutionPayloadHeader(Container):
+ # Execution block header fields
+ parent_hash: Hash32
+ fee_recipient: ExecutionAddress
+ state_root: Bytes32
+ receipts_root: Bytes32
+ logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
+ prev_randao: Bytes32
+ block_number: uint64
+ gas_limit: uint64
+ gas_used: uint64
+ timestamp: uint64
+ extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
+ base_fee_per_gas: uint256
+ # Extra payload fields
+ block_hash: Hash32
+ transactions_root: Root
+ withdrawals_root: Root
+ deposit_receipts_root: Root # [New in EIP-6110]
+```
+
+#### `BeaconState`
+
+```python
+class BeaconState(Container):
+ # Versioning
+ genesis_time: uint64
+ genesis_validators_root: Root
+ slot: Slot
+ fork: Fork
+ # History
+ latest_block_header: BeaconBlockHeader
+ block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
+ state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
+ historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
+ # Eth1
+ eth1_data: Eth1Data
+ eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
+ eth1_deposit_index: uint64
+ # Registry
+ validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
+ balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
+ # Randomness
+ randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
+ # Slashings
+ slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
+ # Participation
+ previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
+ current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
+ # Finality
+ justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch
+ previous_justified_checkpoint: Checkpoint
+ current_justified_checkpoint: Checkpoint
+ finalized_checkpoint: Checkpoint
+ # Inactivity
+ inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT]
+ # Sync
+ current_sync_committee: SyncCommittee
+ next_sync_committee: SyncCommittee
+ # Execution
+ latest_execution_payload_header: ExecutionPayloadHeader
+ # Withdrawals
+ next_withdrawal_index: WithdrawalIndex
+ next_withdrawal_validator_index: ValidatorIndex
+ # Deep history valid from Capella onwards
+ historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]
+ # [New in EIP-6110]
+ deposit_receipts_start_index: uint64
+```
+
+## Beacon chain state transition function
+
+### Block processing
+
+```python
+def process_block(state: BeaconState, block: BeaconBlock) -> None:
+ process_block_header(state, block)
+ if is_execution_enabled(state, block.body):
+ process_withdrawals(state, block.body.execution_payload)
+ process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP-6110]
+ process_randao(state, block.body)
+ process_eth1_data(state, block.body)
+ process_operations(state, block.body) # [Modified in EIP-6110]
+ process_sync_aggregate(state, block.body.sync_aggregate)
+```
+
+#### Modified `process_operations`
+
+*Note*: The function `process_operations` is modified to process `DepositReceipt` operations included in the payload.
+
+```python
+def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
+ # [Modified in EIP-6110]
+ # Disable former deposit mechanism once all prior deposits are processed
+ eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
+ if state.eth1_deposit_index < eth1_deposit_index_limit:
+ assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
+ else:
+ assert len(body.deposits) == 0
+
+ def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
+ for operation in operations:
+ fn(state, operation)
+
+ for_ops(body.proposer_slashings, process_proposer_slashing)
+ for_ops(body.attester_slashings, process_attester_slashing)
+ for_ops(body.attestations, process_attestation)
+ for_ops(body.deposits, process_deposit) # [Modified in EIP-6110]
+ for_ops(body.voluntary_exits, process_voluntary_exit)
+ for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
+
+ # [New in EIP-6110]
+ if is_execution_enabled(state, body):
+ for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt)
+```
+
+#### New `process_deposit_receipt`
+
+```python
+def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) -> None:
+ # Set deposit receipt start index
+ if state.deposit_receipts_start_index == UNSET_DEPOSIT_RECEIPTS_START_INDEX:
+ state.deposit_receipts_start_index = deposit_receipt.index
+
+ apply_deposit(
+ state=state,
+ pubkey=deposit_receipt.pubkey,
+ withdrawal_credentials=deposit_receipt.withdrawal_credentials,
+ amount=deposit_receipt.amount,
+ signature=deposit_receipt.signature,
+ )
+```
+
+#### Modified `process_execution_payload`
+
+*Note*: The function `process_execution_payload` is modified to use the new `ExecutionPayloadHeader` type.
+
+```python
+def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None:
+ # Verify consistency of the parent hash with respect to the previous execution payload header
+ if is_merge_transition_complete(state):
+ assert payload.parent_hash == state.latest_execution_payload_header.block_hash
+ # Verify prev_randao
+ assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state))
+ # Verify timestamp
+ assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
+ # Verify the execution payload is valid
+ assert execution_engine.notify_new_payload(payload)
+ # Cache execution payload header
+ state.latest_execution_payload_header = ExecutionPayloadHeader(
+ parent_hash=payload.parent_hash,
+ fee_recipient=payload.fee_recipient,
+ state_root=payload.state_root,
+ receipts_root=payload.receipts_root,
+ logs_bloom=payload.logs_bloom,
+ prev_randao=payload.prev_randao,
+ block_number=payload.block_number,
+ gas_limit=payload.gas_limit,
+ gas_used=payload.gas_used,
+ timestamp=payload.timestamp,
+ extra_data=payload.extra_data,
+ base_fee_per_gas=payload.base_fee_per_gas,
+ block_hash=payload.block_hash,
+ transactions_root=hash_tree_root(payload.transactions),
+ withdrawals_root=hash_tree_root(payload.withdrawals),
+ deposit_receipts_root=hash_tree_root(payload.deposit_receipts), # [New in EIP-6110]
+ )
+```
+
+## Testing
+
+*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-6110 testing only.
+Modifications include:
+1. Use `EIP6110_FORK_VERSION` as the previous and current fork version.
+2. Utilize the EIP-6110 `BeaconBlockBody` when constructing the initial `latest_block_header`.
+3. Add `deposit_receipts_start_index` variable to the genesis state initialization.
+
+```python
+def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
+ eth1_timestamp: uint64,
+ deposits: Sequence[Deposit],
+ execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
+ ) -> BeaconState:
+ fork = Fork(
+ previous_version=EIP6110_FORK_VERSION, # [Modified in EIP6110] for testing only
+ current_version=EIP6110_FORK_VERSION, # [Modified in EIP6110]
+ epoch=GENESIS_EPOCH,
+ )
+ state = BeaconState(
+ genesis_time=eth1_timestamp + GENESIS_DELAY,
+ fork=fork,
+ eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
+ latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
+ randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
+ deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP6110]
+ )
+
+ # Process deposits
+ leaves = list(map(lambda deposit: deposit.data, deposits))
+ for index, deposit in enumerate(deposits):
+ deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
+ state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
+ process_deposit(state, deposit)
+
+ # Process activations
+ for index, validator in enumerate(state.validators):
+ balance = state.balances[index]
+ validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
+ if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
+ validator.activation_eligibility_epoch = GENESIS_EPOCH
+ validator.activation_epoch = GENESIS_EPOCH
+
+ # Set genesis validators root for domain separation and chain versioning
+ state.genesis_validators_root = hash_tree_root(state.validators)
+
+ # Fill in sync committees
+ # Note: A duplicate committee is assigned for the current and next committee at genesis
+ state.current_sync_committee = get_next_sync_committee(state)
+ state.next_sync_committee = get_next_sync_committee(state)
+
+ # Initialize the execution payload header
+ state.latest_execution_payload_header = execution_payload_header
+
+ return state
+```
diff --git a/specs/_features/eip6110/fork.md b/specs/_features/eip6110/fork.md
new file mode 100644
index 0000000000..b08661e5fa
--- /dev/null
+++ b/specs/_features/eip6110/fork.md
@@ -0,0 +1,142 @@
+# EIP-6110 -- Fork Logic
+
+**Notice**: This document is a work-in-progress for researchers and implementers.
+
+## Table of contents
+
+
+
+
+- [Introduction](#introduction)
+- [Configuration](#configuration)
+- [Helper functions](#helper-functions)
+ - [Misc](#misc)
+ - [Modified `compute_fork_version`](#modified-compute_fork_version)
+- [Fork to EIP-6110](#fork-to-eip-6110)
+ - [Fork trigger](#fork-trigger)
+ - [Upgrading the state](#upgrading-the-state)
+
+
+
+## Introduction
+
+This document describes the process of EIP-6110 upgrade.
+
+## Configuration
+
+Warning: this configuration is not definitive.
+
+| Name | Value |
+| - | - |
+| `EIP6110_FORK_VERSION` | `Version('0x05000000')` |
+| `EIP6110_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
+
+## Helper functions
+
+### Misc
+
+#### Modified `compute_fork_version`
+
+```python
+def compute_fork_version(epoch: Epoch) -> Version:
+ """
+ Return the fork version at the given ``epoch``.
+ """
+ if epoch >= EIP6110_FORK_EPOCH:
+ return EIP6110_FORK_EPOCH
+ if epoch >= CAPELLA_FORK_EPOCH:
+ return CAPELLA_FORK_VERSION
+ if epoch >= BELLATRIX_FORK_EPOCH:
+ return BELLATRIX_FORK_VERSION
+ if epoch >= ALTAIR_FORK_EPOCH:
+ return ALTAIR_FORK_VERSION
+ return GENESIS_FORK_VERSION
+```
+
+## Fork to EIP-6110
+
+### Fork trigger
+
+TBD. This fork is defined for testing purposes, the EIP may be combined with other consensus-layer upgrade.
+For now, we assume the condition will be triggered at epoch `EIP6110_FORK_EPOCH`.
+
+Note that for the pure EIP-6110 networks, we don't apply `upgrade_to_eip6110` since it starts with EIP-6110 version logic.
+
+### Upgrading the state
+
+If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == EIP6110_FORK_EPOCH`,
+an irregular state change is made to upgrade to EIP-6110.
+
+```python
+def upgrade_to_eip6110(pre: capella.BeaconState) -> BeaconState:
+ epoch = capella.get_current_epoch(pre)
+ latest_execution_payload_header = ExecutionPayloadHeader(
+ parent_hash=pre.latest_execution_payload_header.parent_hash,
+ fee_recipient=pre.latest_execution_payload_header.fee_recipient,
+ state_root=pre.latest_execution_payload_header.state_root,
+ receipts_root=pre.latest_execution_payload_header.receipts_root,
+ logs_bloom=pre.latest_execution_payload_header.logs_bloom,
+ prev_randao=pre.latest_execution_payload_header.prev_randao,
+ block_number=pre.latest_execution_payload_header.block_number,
+ gas_limit=pre.latest_execution_payload_header.gas_limit,
+ gas_used=pre.latest_execution_payload_header.gas_used,
+ timestamp=pre.latest_execution_payload_header.timestamp,
+ extra_data=pre.latest_execution_payload_header.extra_data,
+ base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas,
+ block_hash=pre.latest_execution_payload_header.block_hash,
+ transactions_root=pre.latest_execution_payload_header.transactions_root,
+ withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
+ deposit_receipts_root=Root(), # [New in EIP-6110]
+ )
+ post = BeaconState(
+ # Versioning
+ genesis_time=pre.genesis_time,
+ genesis_validators_root=pre.genesis_validators_root,
+ slot=pre.slot,
+ fork=Fork(
+ previous_version=pre.fork.current_version,
+ current_version=EIP6110_FORK_VERSION, # [Modified in EIP-6110]
+ epoch=epoch,
+ ),
+ # History
+ latest_block_header=pre.latest_block_header,
+ block_roots=pre.block_roots,
+ state_roots=pre.state_roots,
+ historical_roots=pre.historical_roots,
+ # Eth1
+ eth1_data=pre.eth1_data,
+ eth1_data_votes=pre.eth1_data_votes,
+ eth1_deposit_index=pre.eth1_deposit_index,
+ # Registry
+ validators=pre.validators,
+ balances=pre.balances,
+ # Randomness
+ randao_mixes=pre.randao_mixes,
+ # Slashings
+ slashings=pre.slashings,
+ # Participation
+ previous_epoch_participation=pre.previous_epoch_participation,
+ current_epoch_participation=pre.current_epoch_participation,
+ # Finality
+ justification_bits=pre.justification_bits,
+ previous_justified_checkpoint=pre.previous_justified_checkpoint,
+ current_justified_checkpoint=pre.current_justified_checkpoint,
+ finalized_checkpoint=pre.finalized_checkpoint,
+ # Inactivity
+ inactivity_scores=pre.inactivity_scores,
+ # Sync
+ current_sync_committee=pre.current_sync_committee,
+ next_sync_committee=pre.next_sync_committee,
+ # Execution-layer
+ latest_execution_payload_header=latest_execution_payload_header, # [Modified in EIP-6110]
+ # Withdrawals
+ next_withdrawal_index=pre.next_withdrawal_index,
+ next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
+ # Deep history valid from Capella onwards
+ historical_summaries=pre.historical_summaries,
+ # EIP-6110
+ deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP-6110]
+ )
+
+ return post
+```
diff --git a/specs/_features/eip6110/validator.md b/specs/_features/eip6110/validator.md
new file mode 100644
index 0000000000..ae9d493a6f
--- /dev/null
+++ b/specs/_features/eip6110/validator.md
@@ -0,0 +1,42 @@
+# EIP-6110 -- Honest Validator
+
+## Table of contents
+
+
+
+
+
+- [Introduction](#introduction)
+- [Prerequisites](#prerequisites)
+- [Block proposal](#block-proposal)
+ - [Deposits](#deposits)
+
+
+
+
+## Introduction
+
+This document represents the changes to be made in the code of an "honest validator" to implement EIP-6110.
+
+## Prerequisites
+
+This document is an extension of the [Capella -- Honest Validator](../../capella/validator.md) guide.
+All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden.
+
+All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [EIP-6110](./beacon-chain.md) are requisite for this document and used throughout.
+Please see related Beacon Chain doc before continuing and use them as a reference throughout.
+
+## Block proposal
+
+### Deposits
+
+The expected number of deposits MUST be changed from `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)` to the result of the following function:
+
+```python
+def get_eth1_deposit_count(state: BeaconState) -> uint64:
+ eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
+ if state.eth1_deposit_index < eth1_deposit_index_limit:
+ return min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
+ else:
+ return uint64(0)
+```
diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md
index fe71a5ff83..58dfad608a 100644
--- a/specs/altair/beacon-chain.md
+++ b/specs/altair/beacon-chain.md
@@ -43,7 +43,7 @@
- [Modified `slash_validator`](#modified-slash_validator)
- [Block processing](#block-processing)
- [Modified `process_attestation`](#modified-process_attestation)
- - [Modified `process_deposit`](#modified-process_deposit)
+ - [Modified `apply_deposit`](#modified-apply_deposit)
- [Sync aggregate processing](#sync-aggregate-processing)
- [Epoch processing](#epoch-processing)
- [Justification and finalization](#justification-and-finalization)
@@ -489,39 +489,29 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
```
-#### Modified `process_deposit`
+#### Modified `apply_deposit`
-*Note*: The function `process_deposit` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, and `current_epoch_participation`.
+*Note*: The function `apply_deposit` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, and `current_epoch_participation`.
```python
-def process_deposit(state: BeaconState, deposit: Deposit) -> None:
- # Verify the Merkle branch
- assert is_valid_merkle_branch(
- leaf=hash_tree_root(deposit.data),
- branch=deposit.proof,
- depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
- index=state.eth1_deposit_index,
- root=state.eth1_data.deposit_root,
- )
-
- # Deposits must be processed in order
- state.eth1_deposit_index += 1
-
- pubkey = deposit.data.pubkey
- amount = deposit.data.amount
+def apply_deposit(state: BeaconState,
+ pubkey: BLSPubkey,
+ withdrawal_credentials: Bytes32,
+ amount: uint64,
+ signature: BLSSignature) -> None:
validator_pubkeys = [validator.pubkey for validator in state.validators]
if pubkey not in validator_pubkeys:
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
deposit_message = DepositMessage(
- pubkey=deposit.data.pubkey,
- withdrawal_credentials=deposit.data.withdrawal_credentials,
- amount=deposit.data.amount,
+ pubkey=pubkey,
+ withdrawal_credentials=withdrawal_credentials,
+ amount=amount,
)
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
signing_root = compute_signing_root(deposit_message, domain)
# Initialize validator if the deposit signature is valid
- if bls.Verify(pubkey, signing_root, deposit.data.signature):
- state.validators.append(get_validator_from_deposit(deposit))
+ if bls.Verify(pubkey, signing_root, signature):
+ state.validators.append(get_validator_from_deposit(pubkey, withdrawal_credentials, amount))
state.balances.append(amount)
# [New in Altair]
state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000))
diff --git a/specs/altair/light-client/sync-protocol.md b/specs/altair/light-client/sync-protocol.md
index f1b012e981..baef684c62 100644
--- a/specs/altair/light-client/sync-protocol.md
+++ b/specs/altair/light-client/sync-protocol.md
@@ -387,7 +387,8 @@ def validate_light_client_update(store: LightClientStore,
pubkey for (bit, pubkey) in zip(sync_aggregate.sync_committee_bits, sync_committee.pubkeys)
if bit
]
- fork_version = compute_fork_version(compute_epoch_at_slot(update.signature_slot))
+ fork_version_slot = max(update.signature_slot, Slot(1)) - Slot(1)
+ fork_version = compute_fork_version(compute_epoch_at_slot(fork_version_slot))
domain = compute_domain(DOMAIN_SYNC_COMMITTEE, fork_version, genesis_validators_root)
signing_root = compute_signing_root(update.attested_header.beacon, domain)
assert bls.FastAggregateVerify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
diff --git a/specs/bellatrix/fork-choice.md b/specs/bellatrix/fork-choice.md
index 94d0688273..ed7d60a932 100644
--- a/specs/bellatrix/fork-choice.md
+++ b/specs/bellatrix/fork-choice.md
@@ -174,6 +174,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
# Check the block is valid and compute the post-state
state = pre_state.copy()
+ block_root = hash_tree_root(block)
state_transition(state, signed_block, True)
# [New in Bellatrix]
@@ -181,9 +182,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
validate_merge_block(block)
# Add new block to the store
- store.blocks[hash_tree_root(block)] = block
+ store.blocks[block_root] = block
# Add new state for this block to the store
- store.block_states[hash_tree_root(block)] = state
+ store.block_states[block_root] = state
# Add proposer score boost if the block is timely
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
@@ -191,15 +192,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
if get_current_slot(store) == block.slot and is_before_attesting_interval:
store.proposer_boost_root = hash_tree_root(block)
- # Update justified checkpoint
- if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
- if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
- store.best_justified_checkpoint = state.current_justified_checkpoint
- if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
- store.justified_checkpoint = state.current_justified_checkpoint
-
- # Update finalized checkpoint
- if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
- store.finalized_checkpoint = state.finalized_checkpoint
- store.justified_checkpoint = state.current_justified_checkpoint
+ # Update checkpoints in store if necessary
+ update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
+
+ # Eagerly compute unrealized justification and finality.
+ compute_pulled_up_tip(store, block_root)
```
diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md
index e93eb54faf..61714cf1a8 100644
--- a/specs/deneb/fork-choice.md
+++ b/specs/deneb/fork-choice.md
@@ -47,7 +47,7 @@ The block MUST NOT be considered valid until all valid `Blob`s have been downloa
def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool:
# `retrieve_blobs_and_proofs` is implementation and context dependent
# It returns all the blobs for the given block root, and raises an exception if not available
- # Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
+ # Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
blobs, proofs = retrieve_blobs_and_proofs(beacon_block_root)
# For testing, `retrieve_blobs_and_proofs` returns ("TEST", "TEST").
@@ -63,7 +63,7 @@ def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZ
### `on_block`
-*Note*: The only modification is the addition of the verification of transition block conditions.
+*Note*: The only modification is the addition of the blob data availability check.
```python
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
@@ -91,6 +91,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
# Check the block is valid and compute the post-state
state = pre_state.copy()
+ block_root = hash_tree_root(block)
state_transition(state, signed_block, True)
# Check the merge transition
@@ -98,9 +99,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
validate_merge_block(block)
# Add new block to the store
- store.blocks[hash_tree_root(block)] = block
+ store.blocks[block_root] = block
# Add new state for this block to the store
- store.block_states[hash_tree_root(block)] = state
+ store.block_states[block_root] = state
# Add proposer score boost if the block is timely
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
@@ -108,15 +109,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
if get_current_slot(store) == block.slot and is_before_attesting_interval:
store.proposer_boost_root = hash_tree_root(block)
- # Update justified checkpoint
- if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
- if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
- store.best_justified_checkpoint = state.current_justified_checkpoint
- if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
- store.justified_checkpoint = state.current_justified_checkpoint
-
- # Update finalized checkpoint
- if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
- store.finalized_checkpoint = state.finalized_checkpoint
- store.justified_checkpoint = state.current_justified_checkpoint
+ # Update checkpoints in store if necessary
+ update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
+
+ # Eagerly compute unrealized justification and finality.
+ compute_pulled_up_tip(store, block_root)
```
diff --git a/specs/deneb/fork.md b/specs/deneb/fork.md
index 1ace26c7f5..23b3f23c7b 100644
--- a/specs/deneb/fork.md
+++ b/specs/deneb/fork.md
@@ -64,8 +64,6 @@ Note that for the pure Deneb networks, we don't apply `upgrade_to_deneb` since i
### Upgrading the state
-Since the `deneb.BeaconState` format is equal to the `capella.BeaconState` format, we only have to update `BeaconState.fork`.
-
```python
def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState:
epoch = capella.get_current_epoch(pre)
@@ -82,10 +80,10 @@ def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState:
timestamp=pre.latest_execution_payload_header.timestamp,
extra_data=pre.latest_execution_payload_header.extra_data,
base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas,
- excess_data_gas=uint256(0), # [New in Deneb]
block_hash=pre.latest_execution_payload_header.block_hash,
transactions_root=pre.latest_execution_payload_header.transactions_root,
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
+ excess_data_gas=uint256(0), # [New in Deneb]
)
post = BeaconState(
# Versioning
diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md
index ea29eb7f4b..9be028620d 100644
--- a/specs/deneb/p2p-interface.md
+++ b/specs/deneb/p2p-interface.md
@@ -15,6 +15,8 @@ The specification of these changes continues in the same format as the network s
- [`BlobSidecar`](#blobsidecar)
- [`SignedBlobSidecar`](#signedblobsidecar)
- [`BlobIdentifier`](#blobidentifier)
+ - [Helpers](#helpers)
+ - [`verify_sidecar_signature`](#verify_sidecar_signature)
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
- [Topics and messages](#topics-and-messages)
- [Global topics](#global-topics)
@@ -38,7 +40,7 @@ The specification of these changes continues in the same format as the network s
| Name | Value | Description |
|------------------------------------------|-----------------------------------|---------------------------------------------------------------------|
| `MAX_REQUEST_BLOCKS_DENEB` | `2**7` (= 128) | Maximum number of blocks in a single request |
-| `MAX_REQUEST_BLOB_SIDECARS` | `2**7` (= 128) | Maximum number of blob sidecars in a single request |
+| `MAX_REQUEST_BLOB_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK` | Maximum number of blob sidecars in a single request |
| `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars |
## Containers
@@ -73,6 +75,17 @@ class BlobIdentifier(Container):
index: BlobIndex
```
+### Helpers
+
+#### `verify_sidecar_signature`
+
+```python
+def verify_blob_sidecar_signature(state: BeaconState, signed_blob_sidecar: SignedBlobSidecar) -> bool:
+ proposer = state.validators[signed_blob_sidecar.message.proposer_index]
+ signing_root = compute_signing_root(signed_blob_sidecar.message, get_domain(state, DOMAIN_BLOB_SIDECAR))
+ return bls.Verify(proposer.pubkey, signing_root, signed_blob_sidecar.signature)
+```
+
## The gossip domain: gossipsub
Some gossip meshes are upgraded in the fork of Deneb to support upgraded types.
@@ -108,11 +121,12 @@ This topic is used to propagate signed blob sidecars, one for each sidecar index
The following validations MUST pass before forwarding the `sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`:
- _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`.
-- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future blocks for processing at the appropriate slot).
+- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot).
- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
-- _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved).
-- _[REJECT]_ The blob's block's parent (defined by `sidecar.block_parent_root`) passes validation.
-- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey.
+- _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved).
+- _[REJECT]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) passes validation.
+- _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `sidecar.block_parent_root`).
+- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid as verified by `verify_sidecar_signature`.
- _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`.
- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`).
If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message.
@@ -183,7 +197,7 @@ Request Content:
```
(
- List[BlobIdentifier, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK]
+ List[BlobIdentifier, MAX_REQUEST_BLOB_SIDECARS]
)
```
@@ -191,7 +205,7 @@ Response Content:
```
(
- List[BlobSidecar, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK]
+ List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS]
)
```
@@ -202,7 +216,7 @@ It may be less in the case that the responding peer is missing blocks or sidecar
The response is unsigned, i.e. `BlobSidecar`, as the signature of the beacon block proposer
may not be available beyond the initial distribution via gossip.
-No more than `MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK` may be requested at a time.
+No more than `MAX_REQUEST_BLOB_SIDECARS` may be requested at a time.
`BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when receiving a block with a transaction whose corresponding blob is missing).
@@ -239,7 +253,7 @@ Request Content:
Response Content:
```
(
- List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK]
+ List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS]
)
```
@@ -274,12 +288,16 @@ to be fully compliant with `BlobSidecarsByRange` requests.
participating in the networking immediately, other peers MAY
disconnect and/or temporarily ban such an un-synced or semi-synced client.
-Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK` sidecars.
+Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS` sidecars.
Clients MUST include all blob sidecars of each block from which they include blob sidecars.
The following blob sidecars, where they exist, MUST be sent in consecutive `(slot, index)` order.
+Slots that do not contain known blobs MUST be skipped, mimicking the behaviour
+of the `BlocksByRange` request. Only response chunks with known blobs should
+therefore be sent.
+
Clients MAY limit the number of blob sidecars in the response.
The response MUST contain no more than `count * MAX_BLOBS_PER_BLOCK` blob sidecars.
diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md
index 61e22e1820..c48857d9e8 100644
--- a/specs/deneb/polynomial-commitments.md
+++ b/specs/deneb/polynomial-commitments.md
@@ -65,14 +65,17 @@ Public functions MUST accept raw bytes as input and perform the required cryptog
| `KZGCommitment` | `Bytes48` | Validation: Perform [BLS standard's](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-2.5) "KeyValidate" check but do allow the identity point |
| `KZGProof` | `Bytes48` | Same as for `KZGCommitment` |
| `Polynomial` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | A polynomial in evaluation form |
-| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A basic blob data |
+| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A basic data blob |
## Constants
| Name | Value | Notes |
| - | - | - |
| `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` | Scalar field modulus of BLS12-381 |
+| `BYTES_PER_COMMITMENT` | `uint64(48)` | The number of bytes in a KZG commitment |
+| `BYTES_PER_PROOF` | `uint64(48)` | The number of bytes in a KZG proof |
| `BYTES_PER_FIELD_ELEMENT` | `uint64(32)` | Bytes used to encode a BLS scalar field element |
+| `BYTES_PER_BLOB` | `uint64(BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB)` | The number of bytes in a blob |
| `G1_POINT_AT_INFINITY` | `Bytes48(b'\xc0' + b'\x00' * 47)` | Serialized form of the point at infinity on the G1 group |
@@ -102,7 +105,7 @@ but reusing the `mainnet` settings in public networks is a critical security req
| `KZG_SETUP_G2_LENGTH` | `65` |
| `KZG_SETUP_G1` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
| `KZG_SETUP_G2` | `Vector[G2Point, KZG_SETUP_G2_LENGTH]`, contents TBD |
-| `KZG_SETUP_LAGRANGE` | `Vector[KZGCommitment, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
+| `KZG_SETUP_LAGRANGE` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
## Helper functions
@@ -273,7 +276,7 @@ def g1_lincomb(points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElemen
BLS multiscalar multiplication. This function can be optimized using Pippenger's algorithm and variants.
"""
assert len(points) == len(scalars)
- result = bls.Z1
+ result = bls.Z1()
for x, a in zip(points, scalars):
result = bls.add(result, bls.multiply(bls.bytes48_to_G1(x), a))
return KZGCommitment(bls.G1_to_bytes48(result))
@@ -303,8 +306,10 @@ def compute_powers(x: BLSFieldElement, n: uint64) -> Sequence[BLSFieldElement]:
def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial,
z: BLSFieldElement) -> BLSFieldElement:
"""
- Evaluate a polynomial (in evaluation form) at an arbitrary point ``z`` that is not in the domain.
- Uses the barycentric formula:
+ Evaluate a polynomial (in evaluation form) at an arbitrary point ``z``.
+ - When ``z`` is in the domain, the evaluation can be found by indexing the polynomial at the
+ position that ``z`` is in the domain.
+ - When ``z`` is not in the domain, the barycentric formula is used:
f(z) = (z**WIDTH - 1) / WIDTH * sum_(i=0)^WIDTH (f(DOMAIN[i]) * DOMAIN[i]) / (z - DOMAIN[i])
"""
width = len(polynomial)
@@ -323,7 +328,7 @@ def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial,
a = BLSFieldElement(int(polynomial[i]) * int(roots_of_unity_brp[i]) % BLS_MODULUS)
b = BLSFieldElement((int(BLS_MODULUS) + int(z) - int(roots_of_unity_brp[i])) % BLS_MODULUS)
result += int(div(a, b) % BLS_MODULUS)
- result = result * int(pow(z, width, BLS_MODULUS) - 1) * int(inverse_width)
+ result = result * int(BLS_MODULUS + pow(z, width, BLS_MODULUS) - 1) * int(inverse_width)
return BLSFieldElement(result % BLS_MODULUS)
```
@@ -338,6 +343,7 @@ def blob_to_kzg_commitment(blob: Blob) -> KZGCommitment:
"""
Public method.
"""
+ assert len(blob) == BYTES_PER_BLOB
return g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), blob_to_polynomial(blob))
```
@@ -345,17 +351,22 @@ def blob_to_kzg_commitment(blob: Blob) -> KZGCommitment:
```python
def verify_kzg_proof(commitment_bytes: Bytes48,
- z: Bytes32,
- y: Bytes32,
+ z_bytes: Bytes32,
+ y_bytes: Bytes32,
proof_bytes: Bytes48) -> bool:
"""
Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``.
Receives inputs as bytes.
Public method.
"""
+ assert len(commitment_bytes) == BYTES_PER_COMMITMENT
+ assert len(z_bytes) == BYTES_PER_FIELD_ELEMENT
+ assert len(y_bytes) == BYTES_PER_FIELD_ELEMENT
+ assert len(proof_bytes) == BYTES_PER_PROOF
+
return verify_kzg_proof_impl(bytes_to_kzg_commitment(commitment_bytes),
- bytes_to_bls_field(z),
- bytes_to_bls_field(y),
+ bytes_to_bls_field(z_bytes),
+ bytes_to_bls_field(y_bytes),
bytes_to_kzg_proof(proof_bytes))
```
@@ -371,10 +382,10 @@ def verify_kzg_proof_impl(commitment: KZGCommitment,
Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``.
"""
# Verify: P - y = Q * (X - z)
- X_minus_z = bls.add(bls.bytes96_to_G2(KZG_SETUP_G2[1]), bls.multiply(bls.G2, BLS_MODULUS - z))
- P_minus_y = bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y))
+ X_minus_z = bls.add(bls.bytes96_to_G2(KZG_SETUP_G2[1]), bls.multiply(bls.G2(), (BLS_MODULUS - z) % BLS_MODULUS))
+ P_minus_y = bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1(), (BLS_MODULUS - y) % BLS_MODULUS))
return bls.pairing_check([
- [P_minus_y, bls.neg(bls.G2)],
+ [P_minus_y, bls.neg(bls.G2())],
[bls.bytes48_to_G1(proof), X_minus_z]
])
```
@@ -415,28 +426,31 @@ def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment],
proofs,
[BLSFieldElement((int(z) * int(r_power)) % BLS_MODULUS) for z, r_power in zip(zs, r_powers)],
)
- C_minus_ys = [bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y))
+ C_minus_ys = [bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1(), (BLS_MODULUS - y) % BLS_MODULUS))
for commitment, y in zip(commitments, ys)]
C_minus_y_as_KZGCommitments = [KZGCommitment(bls.G1_to_bytes48(x)) for x in C_minus_ys]
C_minus_y_lincomb = g1_lincomb(C_minus_y_as_KZGCommitments, r_powers)
return bls.pairing_check([
[bls.bytes48_to_G1(proof_lincomb), bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2[1]))],
- [bls.add(bls.bytes48_to_G1(C_minus_y_lincomb), bls.bytes48_to_G1(proof_z_lincomb)), bls.G2]
+ [bls.add(bls.bytes48_to_G1(C_minus_y_lincomb), bls.bytes48_to_G1(proof_z_lincomb)), bls.G2()]
])
```
#### `compute_kzg_proof`
```python
-def compute_kzg_proof(blob: Blob, z: Bytes32) -> KZGProof:
+def compute_kzg_proof(blob: Blob, z_bytes: Bytes32) -> Tuple[KZGProof, Bytes32]:
"""
Compute KZG proof at point `z` for the polynomial represented by `blob`.
Do this by computing the quotient polynomial in evaluation form: q(x) = (p(x) - p(z)) / (x - z).
Public method.
"""
+ assert len(blob) == BYTES_PER_BLOB
+ assert len(z_bytes) == BYTES_PER_FIELD_ELEMENT
polynomial = blob_to_polynomial(blob)
- return compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z))
+ proof, y = compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z_bytes))
+ return proof, y.to_bytes(BYTES_PER_FIELD_ELEMENT, ENDIANNESS)
```
#### `compute_quotient_eval_within_domain`
@@ -470,7 +484,7 @@ def compute_quotient_eval_within_domain(z: BLSFieldElement,
#### `compute_kzg_proof_impl`
```python
-def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGProof:
+def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> Tuple[KZGProof, BLSFieldElement]:
"""
Helper function for `compute_kzg_proof()` and `compute_blob_kzg_proof()`.
"""
@@ -494,21 +508,25 @@ def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGPro
# Compute: q(x_i) = (p(x_i) - p(z)) / (x_i - z).
quotient_polynomial[i] = div(a, b)
- return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), quotient_polynomial))
+ return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), quotient_polynomial)), y
```
#### `compute_blob_kzg_proof`
```python
-def compute_blob_kzg_proof(blob: Blob) -> KZGProof:
+def compute_blob_kzg_proof(blob: Blob, commitment_bytes: Bytes48) -> KZGProof:
"""
Given a blob, return the KZG proof that is used to verify it against the commitment.
+ This method does not verify that the commitment is correct with respect to `blob`.
Public method.
"""
- commitment = blob_to_kzg_commitment(blob)
+ assert len(blob) == BYTES_PER_BLOB
+ assert len(commitment_bytes) == BYTES_PER_COMMITMENT
+ commitment = bytes_to_kzg_commitment(commitment_bytes)
polynomial = blob_to_polynomial(blob)
evaluation_challenge = compute_challenge(blob, commitment)
- return compute_kzg_proof_impl(polynomial, evaluation_challenge)
+ proof, _ = compute_kzg_proof_impl(polynomial, evaluation_challenge)
+ return proof
```
#### `verify_blob_kzg_proof`
@@ -522,6 +540,10 @@ def verify_blob_kzg_proof(blob: Blob,
Public method.
"""
+ assert len(blob) == BYTES_PER_BLOB
+ assert len(commitment_bytes) == BYTES_PER_COMMITMENT
+ assert len(proof_bytes) == BYTES_PER_PROOF
+
commitment = bytes_to_kzg_commitment(commitment_bytes)
polynomial = blob_to_polynomial(blob)
@@ -551,6 +573,9 @@ def verify_blob_kzg_proof_batch(blobs: Sequence[Blob],
commitments, evaluation_challenges, ys, proofs = [], [], [], []
for blob, commitment_bytes, proof_bytes in zip(blobs, commitments_bytes, proofs_bytes):
+ assert len(blob) == BYTES_PER_BLOB
+ assert len(commitment_bytes) == BYTES_PER_COMMITMENT
+ assert len(proof_bytes) == BYTES_PER_PROOF
commitment = bytes_to_kzg_commitment(commitment_bytes)
commitments.append(commitment)
polynomial = blob_to_polynomial(blob)
@@ -561,3 +586,4 @@ def verify_blob_kzg_proof_batch(blobs: Sequence[Blob],
return verify_kzg_proof_batch(commitments, evaluation_challenges, ys, proofs)
```
+
diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md
index b29330ce57..b627de023e 100644
--- a/specs/deneb/validator.md
+++ b/specs/deneb/validator.md
@@ -44,7 +44,9 @@ Note: This API is *unstable*. `get_blobs_and_kzg_commitments` and `get_payload`
Implementers may also retrieve blobs individually per transaction.
```python
-def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment]]:
+def get_blobs_and_kzg_commitments(
+ payload_id: PayloadId
+) -> Tuple[Sequence[Blob], Sequence[KZGCommitment], Sequence[KZGProof]]:
# pylint: disable=unused-argument
...
```
@@ -66,13 +68,14 @@ use the `payload_id` to retrieve `blobs` and `blob_kzg_commitments` via `get_blo
```python
def validate_blobs_and_kzg_commitments(execution_payload: ExecutionPayload,
blobs: Sequence[Blob],
- blob_kzg_commitments: Sequence[KZGCommitment]) -> None:
+ blob_kzg_commitments: Sequence[KZGCommitment],
+ blob_kzg_proofs: Sequence[KZGProof]) -> None:
# Optionally sanity-check that the KZG commitments match the versioned hashes in the transactions
assert verify_kzg_commitments_against_transactions(execution_payload.transactions, blob_kzg_commitments)
# Optionally sanity-check that the KZG commitments match the blobs (as produced by the execution engine)
- assert len(blob_kzg_commitments) == len(blobs)
- assert [blob_to_kzg_commitment(blob) == commitment for blob, commitment in zip(blobs, blob_kzg_commitments)]
+ assert len(blob_kzg_commitments) == len(blobs) == len(blob_kzg_proofs)
+ assert verify_blob_kzg_proof_batch(blobs, blob_kzg_commitments, blob_kzg_proofs)
```
3. If valid, set `block.body.blob_kzg_commitments = blob_kzg_commitments`.
@@ -87,7 +90,9 @@ Blobs associated with a block are packaged into sidecar objects for distribution
Each `sidecar` is obtained from:
```python
-def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[BlobSidecar]:
+def get_blob_sidecars(block: BeaconBlock,
+ blobs: Sequence[Blob],
+ blob_kzg_proofs: Sequence[KZGProof]) -> Sequence[BlobSidecar]:
return [
BlobSidecar(
block_root=hash_tree_root(block),
@@ -96,7 +101,7 @@ def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[Blo
block_parent_root=block.parent_root,
blob=blob,
kzg_commitment=block.body.blob_kzg_commitments[index],
- kzg_proof=compute_blob_kzg_proof(blob),
+ kzg_proof=blob_kzg_proofs[index],
)
for index, blob in enumerate(blobs)
]
@@ -118,7 +123,7 @@ def get_blob_sidecar_signature(state: BeaconState,
After publishing the peers on the network may request the sidecar through sync-requests, or a local user may be interested.
-The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` epochs and serve when capable,
+The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epochs and serve when capable,
to ensure the data-availability of these blobs throughout the network.
-After `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` nodes MAY prune the sidecars and/or stop serving them.
+After `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` nodes MAY prune the sidecars and/or stop serving them.
diff --git a/specs/phase0/beacon-chain.md b/specs/phase0/beacon-chain.md
index 7e14fa951a..3794cd6be3 100644
--- a/specs/phase0/beacon-chain.md
+++ b/specs/phase0/beacon-chain.md
@@ -1835,13 +1835,12 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
##### Deposits
```python
-def get_validator_from_deposit(deposit: Deposit) -> Validator:
- amount = deposit.data.amount
+def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator:
effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
return Validator(
- pubkey=deposit.data.pubkey,
- withdrawal_credentials=deposit.data.withdrawal_credentials,
+ pubkey=pubkey,
+ withdrawal_credentials=withdrawal_credentials,
activation_eligibility_epoch=FAR_FUTURE_EPOCH,
activation_epoch=FAR_FUTURE_EPOCH,
exit_epoch=FAR_FUTURE_EPOCH,
@@ -1851,36 +1850,26 @@ def get_validator_from_deposit(deposit: Deposit) -> Validator:
```
```python
-def process_deposit(state: BeaconState, deposit: Deposit) -> None:
- # Verify the Merkle branch
- assert is_valid_merkle_branch(
- leaf=hash_tree_root(deposit.data),
- branch=deposit.proof,
- depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
- index=state.eth1_deposit_index,
- root=state.eth1_data.deposit_root,
- )
-
- # Deposits must be processed in order
- state.eth1_deposit_index += 1
-
- pubkey = deposit.data.pubkey
- amount = deposit.data.amount
+def apply_deposit(state: BeaconState,
+ pubkey: BLSPubkey,
+ withdrawal_credentials: Bytes32,
+ amount: uint64,
+ signature: BLSSignature) -> None:
validator_pubkeys = [v.pubkey for v in state.validators]
if pubkey not in validator_pubkeys:
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
deposit_message = DepositMessage(
- pubkey=deposit.data.pubkey,
- withdrawal_credentials=deposit.data.withdrawal_credentials,
- amount=deposit.data.amount,
+ pubkey=pubkey,
+ withdrawal_credentials=withdrawal_credentials,
+ amount=amount,
)
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
signing_root = compute_signing_root(deposit_message, domain)
- if not bls.Verify(pubkey, signing_root, deposit.data.signature):
+ if not bls.Verify(pubkey, signing_root, signature):
return
# Add validator and balance entries
- state.validators.append(get_validator_from_deposit(deposit))
+ state.validators.append(get_validator_from_deposit(pubkey, withdrawal_credentials, amount))
state.balances.append(amount)
else:
# Increase balance by deposit amount
@@ -1888,6 +1877,29 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
increase_balance(state, index, amount)
```
+```python
+def process_deposit(state: BeaconState, deposit: Deposit) -> None:
+ # Verify the Merkle branch
+ assert is_valid_merkle_branch(
+ leaf=hash_tree_root(deposit.data),
+ branch=deposit.proof,
+ depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
+ index=state.eth1_deposit_index,
+ root=state.eth1_data.deposit_root,
+ )
+
+ # Deposits must be processed in order
+ state.eth1_deposit_index += 1
+
+ apply_deposit(
+ state=state,
+ pubkey=deposit.data.pubkey,
+ withdrawal_credentials=deposit.data.withdrawal_credentials,
+ amount=deposit.data.amount,
+ signature=deposit.data.signature,
+ )
+```
+
##### Voluntary exits
```python
diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md
index f2ccc24b9d..6e281d5c3d 100644
--- a/specs/phase0/fork-choice.md
+++ b/specs/phase0/fork-choice.md
@@ -8,21 +8,27 @@
- [Introduction](#introduction)
- [Fork choice](#fork-choice)
- [Constant](#constant)
- - [Preset](#preset)
- [Configuration](#configuration)
- [Helpers](#helpers)
- [`LatestMessage`](#latestmessage)
+ - [`is_previous_epoch_justified`](#is_previous_epoch_justified)
- [`Store`](#store)
- [`get_forkchoice_store`](#get_forkchoice_store)
- [`get_slots_since_genesis`](#get_slots_since_genesis)
- [`get_current_slot`](#get_current_slot)
- [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start)
- [`get_ancestor`](#get_ancestor)
- - [`get_latest_attesting_balance`](#get_latest_attesting_balance)
+ - [`get_weight`](#get_weight)
+ - [`get_voting_source`](#get_voting_source)
- [`filter_block_tree`](#filter_block_tree)
- [`get_filtered_block_tree`](#get_filtered_block_tree)
- [`get_head`](#get_head)
- - [`should_update_justified_checkpoint`](#should_update_justified_checkpoint)
+ - [`update_checkpoints`](#update_checkpoints)
+ - [`update_unrealized_checkpoints`](#update_unrealized_checkpoints)
+ - [Pull-up tip helpers](#pull-up-tip-helpers)
+ - [`compute_pulled_up_tip`](#compute_pulled_up_tip)
+ - [`on_tick` helpers](#on_tick-helpers)
+ - [`on_tick_per_slot`](#on_tick_per_slot)
- [`on_attestation` helpers](#on_attestation-helpers)
- [`validate_target_epoch_against_current_time`](#validate_target_epoch_against_current_time)
- [`validate_on_attestation`](#validate_on_attestation)
@@ -67,12 +73,6 @@ Any of the above handlers that trigger an unhandled exception (e.g. a failed ass
| -------------------- | ----------- |
| `INTERVALS_PER_SLOT` | `uint64(3)` |
-### Preset
-
-| Name | Value | Unit | Duration |
-| -------------------------------- | ------------ | :---: | :--------: |
-| `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` | `2**3` (= 8) | slots | 96 seconds |
-
### Configuration
| Name | Value |
@@ -92,8 +92,26 @@ class LatestMessage(object):
root: Root
```
+
+### `is_previous_epoch_justified`
+
+```python
+def is_previous_epoch_justified(store: Store) -> bool:
+ current_slot = get_current_slot(store)
+ current_epoch = compute_epoch_at_slot(current_slot)
+ return store.justified_checkpoint.epoch + 1 == current_epoch
+```
+
+
#### `Store`
+The `Store` is responsible for tracking information required for the fork choice algorithm. The important fields being tracked are described below:
+
+- `justified_checkpoint`: the justified checkpoint used as the starting point for the LMD GHOST fork choice algorithm.
+- `finalized_checkpoint`: the highest known finalized checkpoint. The fork choice only considers blocks that are not conflicting with this checkpoint.
+- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these track the highest justified & finalized checkpoints resp., without regard to whether on-chain ***realization*** has occurred, i.e. FFG processing of new attestations within the state transition function. This is an important distinction from `justified_checkpoint` & `finalized_checkpoint`, because they will only track the checkpoints that are realized on-chain. Note that on-chain processing of FFG information only happens at epoch boundaries.
+- `unrealized_justifications`: stores a map of block root to the unrealized justified checkpoint observed in that block.
+
```python
@dataclass
class Store(object):
@@ -101,13 +119,15 @@ class Store(object):
genesis_time: uint64
justified_checkpoint: Checkpoint
finalized_checkpoint: Checkpoint
- best_justified_checkpoint: Checkpoint
+ unrealized_justified_checkpoint: Checkpoint
+ unrealized_finalized_checkpoint: Checkpoint
proposer_boost_root: Root
equivocating_indices: Set[ValidatorIndex]
blocks: Dict[Root, BeaconBlock] = field(default_factory=dict)
block_states: Dict[Root, BeaconState] = field(default_factory=dict)
checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
+ unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict)
```
#### `get_forkchoice_store`
@@ -130,12 +150,14 @@ def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -
genesis_time=anchor_state.genesis_time,
justified_checkpoint=justified_checkpoint,
finalized_checkpoint=finalized_checkpoint,
- best_justified_checkpoint=justified_checkpoint,
+ unrealized_justified_checkpoint=justified_checkpoint,
+ unrealized_finalized_checkpoint=finalized_checkpoint,
proposer_boost_root=proposer_boost_root,
equivocating_indices=set(),
blocks={anchor_root: copy(anchor_block)},
block_states={anchor_root: copy(anchor_state)},
checkpoint_states={justified_checkpoint: copy(anchor_state)},
+ unrealized_justifications={anchor_root: justified_checkpoint}
)
```
@@ -167,21 +189,20 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
block = store.blocks[root]
if block.slot > slot:
return get_ancestor(store, block.parent_root, slot)
- elif block.slot == slot:
- return root
- else:
- # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
- return root
+ return root
```
-#### `get_latest_attesting_balance`
+#### `get_weight`
```python
-def get_latest_attesting_balance(store: Store, root: Root) -> Gwei:
+def get_weight(store: Store, root: Root) -> Gwei:
state = store.checkpoint_states[store.justified_checkpoint]
- active_indices = get_active_validator_indices(state, get_current_epoch(state))
+ unslashed_and_active_indices = [
+ i for i in get_active_validator_indices(state, get_current_epoch(state))
+ if not state.validators[i].slashed
+ ]
attestation_score = Gwei(sum(
- state.validators[i].effective_balance for i in active_indices
+ state.validators[i].effective_balance for i in unslashed_and_active_indices
if (i in store.latest_messages
and i not in store.equivocating_indices
and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
@@ -197,11 +218,32 @@ def get_latest_attesting_balance(store: Store, root: Root) -> Gwei:
committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH
proposer_score = (committee_weight * PROPOSER_SCORE_BOOST) // 100
return attestation_score + proposer_score
+```
+
+#### `get_voting_source`
+
+```python
+def get_voting_source(store: Store, block_root: Root) -> Checkpoint:
+ """
+ Compute the voting source checkpoint in event that block with root ``block_root`` is the head block
+ """
+ block = store.blocks[block_root]
+ current_epoch = compute_epoch_at_slot(get_current_slot(store))
+ block_epoch = compute_epoch_at_slot(block.slot)
+ if current_epoch > block_epoch:
+ # The block is from a prior epoch, the voting source will be pulled-up
+ return store.unrealized_justifications[block_root]
+ else:
+ # The block is not from a prior epoch, therefore the voting source is not pulled up
+ head_state = store.block_states[block_root]
+ return head_state.current_justified_checkpoint
```
#### `filter_block_tree`
+*Note*: External calls to `filter_block_tree` (i.e., any calls that are not made by the recursive logic in this function) MUST set `block_root` to `store.justified_checkpoint`.
+
```python
def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool:
block = store.blocks[block_root]
@@ -219,17 +261,29 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB
return True
return False
- # If leaf block, check finalized/justified checkpoints as matching latest.
- head_state = store.block_states[block_root]
+ current_epoch = compute_epoch_at_slot(get_current_slot(store))
+ voting_source = get_voting_source(store, block_root)
+ # The voting source should be at the same height as the store's justified checkpoint
correct_justified = (
store.justified_checkpoint.epoch == GENESIS_EPOCH
- or head_state.current_justified_checkpoint == store.justified_checkpoint
+ or voting_source.epoch == store.justified_checkpoint.epoch
)
+
+ # If the previous epoch is justified, the block should be pulled-up. In this case, check that unrealized
+ # justification is higher than the store and that the voting source is not more than two epochs ago
+ if not correct_justified and is_previous_epoch_justified(store):
+ correct_justified = (
+ store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch and
+ voting_source.epoch + 2 >= current_epoch
+ )
+
+ finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
correct_finalized = (
store.finalized_checkpoint.epoch == GENESIS_EPOCH
- or head_state.finalized_checkpoint == store.finalized_checkpoint
+ or store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot)
)
+
# If expected finalized/justified, add to viable block-tree and signal viability to parent.
if correct_justified and correct_finalized:
blocks[block_root] = block
@@ -270,28 +324,83 @@ def get_head(store: Store) -> Root:
return head
# Sort by latest attesting balance with ties broken lexicographically
# Ties broken by favoring block with lexicographically higher root
- head = max(children, key=lambda root: (get_latest_attesting_balance(store, root), root))
+ head = max(children, key=lambda root: (get_weight(store, root), root))
```
-#### `should_update_justified_checkpoint`
+#### `update_checkpoints`
```python
-def should_update_justified_checkpoint(store: Store, new_justified_checkpoint: Checkpoint) -> bool:
+def update_checkpoints(store: Store, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint) -> None:
+ """
+ Update checkpoints in store if necessary
"""
- To address the bouncing attack, only update conflicting justified
- checkpoints in the fork choice if in the early slots of the epoch.
- Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
+ # Update justified checkpoint
+ if justified_checkpoint.epoch > store.justified_checkpoint.epoch:
+ store.justified_checkpoint = justified_checkpoint
- See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
+ # Update finalized checkpoint
+ if finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
+ store.finalized_checkpoint = finalized_checkpoint
+```
+
+#### `update_unrealized_checkpoints`
+
+```python
+def update_unrealized_checkpoints(store: Store, unrealized_justified_checkpoint: Checkpoint,
+ unrealized_finalized_checkpoint: Checkpoint) -> None:
"""
- if compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED:
- return True
+ Update unrealized checkpoints in store if necessary
+ """
+ # Update unrealized justified checkpoint
+ if unrealized_justified_checkpoint.epoch > store.unrealized_justified_checkpoint.epoch:
+ store.unrealized_justified_checkpoint = unrealized_justified_checkpoint
- justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
- if not get_ancestor(store, new_justified_checkpoint.root, justified_slot) == store.justified_checkpoint.root:
- return False
+ # Update unrealized finalized checkpoint
+ if unrealized_finalized_checkpoint.epoch > store.unrealized_finalized_checkpoint.epoch:
+ store.unrealized_finalized_checkpoint = unrealized_finalized_checkpoint
+```
+
+
+#### Pull-up tip helpers
+
+##### `compute_pulled_up_tip`
+
+```python
+def compute_pulled_up_tip(store: Store, block_root: Root) -> None:
+ state = store.block_states[block_root].copy()
+ # Pull up the post-state of the block to the next epoch boundary
+ process_justification_and_finalization(state)
+
+ store.unrealized_justifications[block_root] = state.current_justified_checkpoint
+ update_unrealized_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
+
+ # If the block is from a prior epoch, apply the realized values
+ block_epoch = compute_epoch_at_slot(store.blocks[block_root].slot)
+ current_epoch = compute_epoch_at_slot(get_current_slot(store))
+ if block_epoch < current_epoch:
+ update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
+```
+
+#### `on_tick` helpers
+
+##### `on_tick_per_slot`
+
+```python
+def on_tick_per_slot(store: Store, time: uint64) -> None:
+ previous_slot = get_current_slot(store)
+
+ # Update store time
+ store.time = time
+
+ current_slot = get_current_slot(store)
- return True
+ # If this is a new slot, reset store.proposer_boost_root
+ if current_slot > previous_slot:
+ store.proposer_boost_root = Root()
+
+ # If a new epoch, pull-up justification and finalization from previous epoch
+ if current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0:
+ update_checkpoints(store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint)
```
#### `on_attestation` helpers
@@ -324,7 +433,7 @@ def validate_on_attestation(store: Store, attestation: Attestation, is_from_bloc
# Check that the epoch number and slot number are matching
assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
- # Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
+ # Attestation target must be for a known block. If target block is unknown, delay consideration until block is found
assert target.root in store.blocks
# Attestations must be for a known block. If block is unknown, delay consideration until the block is found
@@ -372,27 +481,13 @@ def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIn
```python
def on_tick(store: Store, time: uint64) -> None:
- previous_slot = get_current_slot(store)
-
- # update store time
- store.time = time
-
- current_slot = get_current_slot(store)
-
- # Reset store.proposer_boost_root if this is a new slot
- if current_slot > previous_slot:
- store.proposer_boost_root = Root()
-
- # Not a new epoch, return
- if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0):
- return
-
- # Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
- if store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
- finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
- ancestor_at_finalized_slot = get_ancestor(store, store.best_justified_checkpoint.root, finalized_slot)
- if ancestor_at_finalized_slot == store.finalized_checkpoint.root:
- store.justified_checkpoint = store.best_justified_checkpoint
+ # If the ``store.time`` falls behind, while loop catches up slot by slot
+ # to ensure that every previous slot is processed with ``on_tick_per_slot``
+ tick_slot = (time - store.genesis_time) // SECONDS_PER_SLOT
+ while get_current_slot(store) < tick_slot:
+ previous_time = store.genesis_time + (get_current_slot(store) + 1) * SECONDS_PER_SLOT
+ on_tick_per_slot(store, previous_time)
+ on_tick_per_slot(store, time)
```
#### `on_block`
@@ -415,11 +510,12 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
# Check the block is valid and compute the post-state
state = pre_state.copy()
+ block_root = hash_tree_root(block)
state_transition(state, signed_block, True)
# Add new block to the store
- store.blocks[hash_tree_root(block)] = block
+ store.blocks[block_root] = block
# Add new state for this block to the store
- store.block_states[hash_tree_root(block)] = state
+ store.block_states[block_root] = state
# Add proposer score boost if the block is timely
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
@@ -427,17 +523,11 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
if get_current_slot(store) == block.slot and is_before_attesting_interval:
store.proposer_boost_root = hash_tree_root(block)
- # Update justified checkpoint
- if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
- if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
- store.best_justified_checkpoint = state.current_justified_checkpoint
- if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
- store.justified_checkpoint = state.current_justified_checkpoint
+ # Update checkpoints in store if necessary
+ update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
- # Update finalized checkpoint
- if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
- store.finalized_checkpoint = state.finalized_checkpoint
- store.justified_checkpoint = state.current_justified_checkpoint
+ # Eagerly compute unrealized justification and finality
+ compute_pulled_up_tip(store, block_root)
```
#### `on_attestation`
diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt
index 99aab26b29..9b388ed89d 100644
--- a/tests/core/pyspec/eth2spec/VERSION.txt
+++ b/tests/core/pyspec/eth2spec/VERSION.txt
@@ -1 +1 @@
-1.3.0-rc.3
+1.3.0-rc.4
diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py
index 63bec26b09..d33e68961d 100644
--- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py
+++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py
@@ -668,10 +668,9 @@ def run_test_single_fork(spec, phases, state, fork):
# Upgrade to post-fork spec, attested block is still before the fork
attested_block = block.copy()
attested_state = state.copy()
- state, _ = do_fork(state, spec, phases[fork], fork_epoch, with_block=False)
+ sync_aggregate, _ = get_sync_aggregate(phases[fork], state)
+ state, block = do_fork(state, spec, phases[fork], fork_epoch, sync_aggregate=sync_aggregate)
spec = phases[fork]
- sync_aggregate, _ = get_sync_aggregate(spec, state)
- block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
assert test.store.finalized_header.beacon.slot == finalized_state.slot
assert test.store.next_sync_committee == finalized_state.next_sync_committee
@@ -755,18 +754,16 @@ def run_test_multi_fork(spec, phases, state, fork_1, fork_2):
# ..., attested is from `fork_1`, ...
fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH')
transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_1_epoch) - 1)
- state, _ = do_fork(state, spec, phases[fork_1], fork_1_epoch, with_block=False)
+ state, attested_block = do_fork(state, spec, phases[fork_1], fork_1_epoch)
spec = phases[fork_1]
- attested_block = state_transition_with_full_block(spec, state, True, True)
attested_state = state.copy()
# ..., and signature is from `fork_2`
fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH')
transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_2_epoch) - 1)
- state, _ = do_fork(state, spec, phases[fork_2], fork_2_epoch, with_block=False)
+ sync_aggregate, _ = get_sync_aggregate(phases[fork_2], state)
+ state, block = do_fork(state, spec, phases[fork_2], fork_2_epoch, sync_aggregate=sync_aggregate)
spec = phases[fork_2]
- sync_aggregate, _ = get_sync_aggregate(spec, state)
- block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
# Check that update applies
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
diff --git a/tests/core/pyspec/eth2spec/test/conftest.py b/tests/core/pyspec/eth2spec/test/conftest.py
index a5f19e20cb..3026b48eb7 100644
--- a/tests/core/pyspec/eth2spec/test/conftest.py
+++ b/tests/core/pyspec/eth2spec/test/conftest.py
@@ -44,8 +44,11 @@ def pytest_addoption(parser):
help="bls-default: make tests that are not dependent on BLS run without BLS"
)
parser.addoption(
- "--bls-type", action="store", type=str, default="py_ecc", choices=["py_ecc", "milagro"],
- help="bls-type: use 'pyecc' or 'milagro' implementation for BLS"
+ "--bls-type", action="store", type=str, default="py_ecc", choices=["py_ecc", "milagro", "arkworks", "fastest"],
+ help=(
+ "bls-type: use specified BLS implementation;"
+ "fastest: use milagro for signatures and arkworks for everything else (e.g. KZG)"
+ )
)
@@ -88,5 +91,9 @@ def bls_type(request):
bls_utils.use_py_ecc()
elif bls_type == "milagro":
bls_utils.use_milagro()
+ elif bls_type == "arkworks":
+ bls_utils.use_arkworks()
+ elif bls_type == "fastest":
+ bls_utils.use_fastest()
else:
raise Exception(f"unrecognized bls type: {bls_type}")
diff --git a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py
index c7fb708b8f..5e65dbd4ef 100644
--- a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py
+++ b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py
@@ -22,7 +22,7 @@ def test_one_blob(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
- opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec)
+ opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
@@ -38,7 +38,7 @@ def test_max_blobs(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
- opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK)
+ opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py
index d9934c5ade..0d7bd53e52 100644
--- a/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py
+++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py
@@ -18,14 +18,13 @@
def _run_validate_blobs(spec, state, blob_count):
block = build_empty_block_for_next_slot(spec, state)
- opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count)
+ opaque_tx, blobs, blob_kzg_commitments, kzg_proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
state_transition_and_sign_block(spec, state, block)
- # Also test the proof generation in `get_blob_sidecars`
- blob_sidecars = spec.get_blob_sidecars(block, blobs)
+ blob_sidecars = spec.get_blob_sidecars(block, blobs, kzg_proofs)
blobs = [sidecar.blob for sidecar in blob_sidecars]
kzg_proofs = [sidecar.kzg_proof for sidecar in blob_sidecars]
spec.validate_blobs(blob_kzg_commitments, blobs, kzg_proofs)
diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py
index 67dce5c5b3..f42f88393d 100644
--- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py
+++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py
@@ -1,32 +1,108 @@
import random
from eth2spec.test.context import (
- spec_state_test,
+ spec_test,
+ single_phase,
with_deneb_and_later,
+ expect_assertion_error
)
from eth2spec.test.helpers.sharding import (
get_sample_blob,
get_poly_in_both_forms,
eval_poly_in_coeff_form,
)
+from eth2spec.utils import bls
+from eth2spec.utils.bls import BLS_MODULUS
+
+G1 = bls.G1_to_bytes48(bls.G1())
+P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" +
+ "0123456789abcdef0123456789abcdef0123456789abcdef")
+P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" +
+ "0123456789abcdef0123456789abcdef0123456789abcde0")
+
+
+def bls_add_one(x):
+ """
+ Adds "one" (actually bls.G1()) to a compressed group element.
+ Useful to compute definitely incorrect proofs.
+ """
+ return bls.G1_to_bytes48(
+ bls.add(bls.bytes48_to_G1(x), bls.G1())
+ )
+
+
+def field_element_bytes(x):
+ return int.to_bytes(x % BLS_MODULUS, 32, "little")
@with_deneb_and_later
-@spec_state_test
-def test_verify_kzg_proof(spec, state):
- x = 3
+@spec_test
+@single_phase
+def test_verify_kzg_proof(spec):
+ """
+ Test the wrapper functions (taking bytes arguments) for computing and verifying KZG proofs.
+ """
+ x = field_element_bytes(3)
+ blob = get_sample_blob(spec)
+ commitment = spec.blob_to_kzg_commitment(blob)
+ proof, y = spec.compute_kzg_proof(blob, x)
+
+ assert spec.verify_kzg_proof(commitment, x, y, proof)
+
+
+@with_deneb_and_later
+@spec_test
+@single_phase
+def test_verify_kzg_proof_incorrect_proof(spec):
+ """
+ Test the wrapper function `verify_kzg_proof` fails on an incorrect proof.
+ """
+ x = field_element_bytes(3465)
+ blob = get_sample_blob(spec)
+ commitment = spec.blob_to_kzg_commitment(blob)
+ proof, y = spec.compute_kzg_proof(blob, x)
+ proof = bls_add_one(proof)
+
+ assert not spec.verify_kzg_proof(commitment, x, y, proof)
+
+
+@with_deneb_and_later
+@spec_test
+@single_phase
+def test_verify_kzg_proof_impl(spec):
+ """
+ Test the implementation functions (taking field element arguments) for computing and verifying KZG proofs.
+ """
+ x = BLS_MODULUS - 1
blob = get_sample_blob(spec)
commitment = spec.blob_to_kzg_commitment(blob)
polynomial = spec.blob_to_polynomial(blob)
- proof = spec.compute_kzg_proof_impl(polynomial, x)
+ proof, y = spec.compute_kzg_proof_impl(polynomial, x)
- y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x)
assert spec.verify_kzg_proof_impl(commitment, x, y, proof)
@with_deneb_and_later
-@spec_state_test
-def test_barycentric_outside_domain(spec, state):
+@spec_test
+@single_phase
+def test_verify_kzg_proof_impl_incorrect_proof(spec):
+ """
+ Test the implementation function `verify_kzg_proof` fails on an incorrect proof.
+ """
+ x = 324561
+ blob = get_sample_blob(spec)
+ commitment = spec.blob_to_kzg_commitment(blob)
+ polynomial = spec.blob_to_polynomial(blob)
+ proof, y = spec.compute_kzg_proof_impl(polynomial, x)
+ proof = bls_add_one(proof)
+
+ assert not spec.verify_kzg_proof_impl(commitment, x, y, proof)
+
+
+@with_deneb_and_later
+@spec_test
+@single_phase
+def test_barycentric_outside_domain(spec):
"""
Test barycentric formula correctness by using it to evaluate a polynomial at a bunch of points outside its domain
(the roots of unity).
@@ -43,9 +119,9 @@ def test_barycentric_outside_domain(spec, state):
for _ in range(n_samples):
# Get a random evaluation point and make sure it's not a root of unity
- z = rng.randint(0, spec.BLS_MODULUS - 1)
+ z = rng.randint(0, BLS_MODULUS - 1)
while z in roots_of_unity_brp:
- z = rng.randint(0, spec.BLS_MODULUS - 1)
+ z = rng.randint(0, BLS_MODULUS - 1)
# Get p(z) by evaluating poly in coefficient form
p_z_coeff = eval_poly_in_coeff_form(spec, poly_coeff, z)
@@ -58,8 +134,9 @@ def test_barycentric_outside_domain(spec, state):
@with_deneb_and_later
-@spec_state_test
-def test_barycentric_within_domain(spec, state):
+@spec_test
+@single_phase
+def test_barycentric_within_domain(spec):
"""
Test barycentric formula correctness by using it to evaluate a polynomial at all the points of its domain
(the roots of unity).
@@ -90,8 +167,9 @@ def test_barycentric_within_domain(spec, state):
@with_deneb_and_later
-@spec_state_test
-def test_compute_kzg_proof_within_domain(spec, state):
+@spec_test
+@single_phase
+def test_compute_kzg_proof_within_domain(spec):
"""
Create and verify KZG proof that p(z) == y
where z is in the domain of our KZG scheme (i.e. a relevant root of unity).
@@ -103,7 +181,125 @@ def test_compute_kzg_proof_within_domain(spec, state):
roots_of_unity_brp = spec.bit_reversal_permutation(spec.ROOTS_OF_UNITY)
for i, z in enumerate(roots_of_unity_brp):
- proof = spec.compute_kzg_proof_impl(polynomial, z)
+ proof, y = spec.compute_kzg_proof_impl(polynomial, z)
- y = spec.evaluate_polynomial_in_evaluation_form(polynomial, z)
assert spec.verify_kzg_proof_impl(commitment, z, y, proof)
+
+
+@with_deneb_and_later
+@spec_test
+@single_phase
+def test_verify_blob_kzg_proof(spec):
+ """
+ Test the functions to compute and verify a blob KZG proof
+ """
+ blob = get_sample_blob(spec)
+ commitment = spec.blob_to_kzg_commitment(blob)
+ proof = spec.compute_blob_kzg_proof(blob, commitment)
+
+ assert spec.verify_blob_kzg_proof(blob, commitment, proof)
+
+
+@with_deneb_and_later
+@spec_test
+@single_phase
+def test_verify_blob_kzg_proof_incorrect_proof(spec):
+ """
+ Check that `verify_blob_kzg_proof` fails on an incorrect proof
+ """
+ blob = get_sample_blob(spec)
+ commitment = spec.blob_to_kzg_commitment(blob)
+ proof = spec.compute_blob_kzg_proof(blob, commitment)
+ proof = bls_add_one(proof)
+
+ assert not spec.verify_blob_kzg_proof(blob, commitment, proof)
+
+
+@with_deneb_and_later
+@spec_test
+@single_phase
+def test_validate_kzg_g1_generator(spec):
+ """
+ Verify that `validate_kzg_g1` allows the generator G1
+ """
+
+ spec.validate_kzg_g1(bls.G1_to_bytes48(bls.G1()))
+
+
+@with_deneb_and_later
+@spec_test
+@single_phase
+def test_validate_kzg_g1_neutral_element(spec):
+ """
+ Verify that `validate_kzg_g1` allows the neutral element in G1
+ """
+
+ spec.validate_kzg_g1(bls.G1_to_bytes48(bls.Z1()))
+
+
+@with_deneb_and_later
+@spec_test
+@single_phase
+def test_validate_kzg_g1_not_in_g1(spec):
+ """
+ Verify that `validate_kzg_g1` fails on point not in G1
+ """
+
+ expect_assertion_error(lambda: spec.validate_kzg_g1(P1_NOT_IN_G1))
+
+
+@with_deneb_and_later
+@spec_test
+@single_phase
+def test_validate_kzg_g1_not_on_curve(spec):
+ """
+ Verify that `validate_kzg_g1` fails on point not in G1
+ """
+
+ expect_assertion_error(lambda: spec.validate_kzg_g1(P1_NOT_ON_CURVE))
+
+
+@with_deneb_and_later
+@spec_test
+@single_phase
+def test_bytes_to_bls_field_zero(spec):
+ """
+ Verify that `bytes_to_bls_field` handles zero
+ """
+
+ spec.bytes_to_bls_field(b"\0" * 32)
+
+
+@with_deneb_and_later
+@spec_test
+@single_phase
+def test_bytes_to_bls_field_modulus_minus_one(spec):
+ """
+ Verify that `bytes_to_bls_field` handles modulus minus one
+ """
+
+ spec.bytes_to_bls_field((BLS_MODULUS - 1).to_bytes(spec.BYTES_PER_FIELD_ELEMENT, spec.ENDIANNESS))
+
+
+@with_deneb_and_later
+@spec_test
+@single_phase
+def test_bytes_to_bls_field_modulus(spec):
+ """
+ Verify that `bytes_to_bls_field` fails on BLS modulus
+ """
+
+ expect_assertion_error(lambda: spec.bytes_to_bls_field(
+ BLS_MODULUS.to_bytes(spec.BYTES_PER_FIELD_ELEMENT, spec.ENDIANNESS)
+ ))
+
+
+@with_deneb_and_later
+@spec_test
+@single_phase
+def test_bytes_to_bls_field_max(spec):
+ """
+ Verify that `bytes_to_bls_field` fails on 2**256 - 1
+ """
+
+ expect_assertion_error(lambda: spec.bytes_to_bls_field(b"\xFF" * 32))
diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_kzg.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_kzg.py
deleted file mode 100644
index 71bfae8b89..0000000000
--- a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_kzg.py
+++ /dev/null
@@ -1,21 +0,0 @@
-
-from eth2spec.test.helpers.constants import (
- DENEB,
- MINIMAL,
-)
-from eth2spec.test.helpers.sharding import (
- get_sample_blob,
-)
-from eth2spec.test.context import (
- with_phases,
- spec_state_test,
- with_presets,
-)
-
-
-@with_phases([DENEB])
-@spec_state_test
-@with_presets([MINIMAL])
-def test_blob_to_kzg_commitment(spec, state):
- blob = get_sample_blob(spec)
- spec.blob_to_kzg_commitment(blob)
diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py
index 13150180bc..3c3b51ff1a 100644
--- a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py
+++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py
@@ -17,7 +17,7 @@
@spec_state_test
@with_presets([MINIMAL])
def test_tx_peek_blob_versioned_hashes(spec, state):
- otx, blobs, commitments = get_sample_opaque_tx(spec)
+ otx, _, commitments, _ = get_sample_opaque_tx(spec)
data_hashes = spec.tx_peek_blob_versioned_hashes(otx)
expected = [spec.kzg_commitment_to_versioned_hash(blob_commitment) for blob_commitment in commitments]
assert expected == data_hashes
diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py
new file mode 100644
index 0000000000..07039ccfeb
--- /dev/null
+++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py
@@ -0,0 +1,158 @@
+from eth2spec.test.context import (
+ always_bls,
+ spec_state_test,
+ with_deneb_and_later,
+ expect_assertion_error
+)
+from eth2spec.test.helpers.execution_payload import (
+ compute_el_block_hash,
+)
+from eth2spec.test.helpers.sharding import (
+ get_sample_opaque_tx,
+)
+from eth2spec.test.helpers.block import (
+ build_empty_block_for_next_slot
+)
+from eth2spec.test.helpers.keys import (
+ pubkey_to_privkey
+)
+
+
+@with_deneb_and_later
+@spec_state_test
+def test_validate_blobs_and_kzg_commitments(spec, state):
+ """
+ Test `validate_blobs_and_kzg_commitments`
+ """
+ blob_count = 4
+ block = build_empty_block_for_next_slot(spec, state)
+ opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
+ block.body.blob_kzg_commitments = blob_kzg_commitments
+ block.body.execution_payload.transactions = [opaque_tx]
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+
+ spec.validate_blobs_and_kzg_commitments(block.body.execution_payload,
+ blobs,
+ blob_kzg_commitments,
+ proofs)
+
+
+@with_deneb_and_later
+@spec_state_test
+def test_validate_blobs_and_kzg_commitments_missing_blob(spec, state):
+ """
+ Test `validate_blobs_and_kzg_commitments`
+ """
+ blob_count = 4
+ block = build_empty_block_for_next_slot(spec, state)
+ opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
+ block.body.blob_kzg_commitments = blob_kzg_commitments
+ block.body.execution_payload.transactions = [opaque_tx]
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+
+ expect_assertion_error(
+ lambda: spec.validate_blobs_and_kzg_commitments(
+ block.body.execution_payload,
+ blobs[:-1],
+ blob_kzg_commitments,
+ proofs
+ )
+ )
+
+
+@with_deneb_and_later
+@spec_state_test
+def test_validate_blobs_and_kzg_commitments_missing_proof(spec, state):
+ """
+ Test `validate_blobs_and_kzg_commitments`
+ """
+ blob_count = 4
+ block = build_empty_block_for_next_slot(spec, state)
+ opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
+ block.body.blob_kzg_commitments = blob_kzg_commitments
+ block.body.execution_payload.transactions = [opaque_tx]
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+
+ expect_assertion_error(
+ lambda: spec.validate_blobs_and_kzg_commitments(
+ block.body.execution_payload,
+ blobs,
+ blob_kzg_commitments,
+ proofs[:-1]
+ )
+ )
+
+
+@with_deneb_and_later
+@spec_state_test
+def test_validate_blobs_and_kzg_commitments_incorrect_blob(spec, state):
+ """
+ Test `validate_blobs_and_kzg_commitments`
+ """
+ blob_count = 4
+ block = build_empty_block_for_next_slot(spec, state)
+ opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
+ block.body.blob_kzg_commitments = blob_kzg_commitments
+ block.body.execution_payload.transactions = [opaque_tx]
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+
+ blobs[1] = spec.Blob(blobs[1][:13] + bytes([(blobs[1][13] + 1) % 256]) + blobs[1][14:])
+
+ expect_assertion_error(
+ lambda: spec.validate_blobs_and_kzg_commitments(
+ block.body.execution_payload,
+ blobs,
+ blob_kzg_commitments,
+ proofs
+ )
+ )
+
+
+@with_deneb_and_later
+@spec_state_test
+def test_blob_sidecar_signature(spec, state):
+ """
+ Test `get_blob_sidecar_signature`
+ """
+ blob_count = 4
+ block = build_empty_block_for_next_slot(spec, state)
+ opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
+ block.body.blob_kzg_commitments = blob_kzg_commitments
+ block.body.execution_payload.transactions = [opaque_tx]
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+
+ blob_sidecars = spec.get_blob_sidecars(block, blobs, proofs)
+ proposer = state.validators[blob_sidecars[1].proposer_index]
+ privkey = pubkey_to_privkey[proposer.pubkey]
+ sidecar_signature = spec.get_blob_sidecar_signature(state,
+ blob_sidecars[1],
+ privkey)
+
+ signed_blob_sidecar = spec.SignedBlobSidecar(message=blob_sidecars[1], signature=sidecar_signature)
+
+ assert spec.verify_blob_sidecar_signature(state, signed_blob_sidecar)
+
+
+@with_deneb_and_later
+@spec_state_test
+@always_bls
+def test_blob_sidecar_signature_incorrect(spec, state):
+ """
+ Test `get_blob_sidecar_signature`
+ """
+ blob_count = 4
+ block = build_empty_block_for_next_slot(spec, state)
+ opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
+ block.body.blob_kzg_commitments = blob_kzg_commitments
+ block.body.execution_payload.transactions = [opaque_tx]
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+
+ blob_sidecars = spec.get_blob_sidecars(block, blobs, proofs)
+
+ sidecar_signature = spec.get_blob_sidecar_signature(state,
+ blob_sidecars[1],
+ 123)
+
+ signed_blob_sidecar = spec.SignedBlobSidecar(message=blob_sidecars[1], signature=sidecar_signature)
+
+ assert not spec.verify_blob_sidecar_signature(state, signed_blob_sidecar)
diff --git a/tests/core/pyspec/eth2spec/test/helpers/attestations.py b/tests/core/pyspec/eth2spec/test/helpers/attestations.py
index c60d047b92..360e194f59 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/attestations.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/attestations.py
@@ -187,7 +187,7 @@ def add_attestations_to_state(spec, state, attestations, slot):
spec.process_attestation(state, attestation)
-def _get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None):
+def get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None):
committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot_to_attest))
for index in range(committees_per_slot):
def participants_filter(comm):
@@ -262,7 +262,7 @@ def state_transition_with_full_block(spec,
if fill_cur_epoch and state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY:
slot_to_attest = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(state)):
- attestations = _get_valid_attestation_at_slot(
+ attestations = get_valid_attestation_at_slot(
state,
spec,
slot_to_attest,
@@ -272,7 +272,7 @@ def state_transition_with_full_block(spec,
block.body.attestations.append(attestation)
if fill_prev_epoch:
slot_to_attest = state.slot - spec.SLOTS_PER_EPOCH + 1
- attestations = _get_valid_attestation_at_slot(
+ attestations = get_valid_attestation_at_slot(
state,
spec,
slot_to_attest,
@@ -300,7 +300,7 @@ def state_transition_with_full_attestations_block(spec, state, fill_cur_epoch, f
slots = state.slot % spec.SLOTS_PER_EPOCH
for slot_offset in range(slots):
target_slot = state.slot - slot_offset
- attestations += _get_valid_attestation_at_slot(
+ attestations += get_valid_attestation_at_slot(
state,
spec,
target_slot,
@@ -311,7 +311,7 @@ def state_transition_with_full_attestations_block(spec, state, fill_cur_epoch, f
slots = spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH
for slot_offset in range(1, slots):
target_slot = state.slot - (state.slot % spec.SLOTS_PER_EPOCH) - slot_offset
- attestations += _get_valid_attestation_at_slot(
+ attestations += get_valid_attestation_at_slot(
state,
spec,
target_slot,
diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py
index bd8abd95b5..af231d87ff 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py
@@ -3,6 +3,7 @@
from eth2spec.test.helpers.attestations import (
next_epoch_with_attestations,
next_slots_with_attestations,
+ state_transition_with_full_block,
)
@@ -16,12 +17,13 @@ def get_anchor_root(spec, state):
def tick_and_add_block(spec, store, signed_block, test_steps, valid=True,
merge_block=False, block_not_found=False, is_optimistic=False):
pre_state = store.block_states[signed_block.message.parent_root]
- block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT
if merge_block:
assert spec.is_merge_transition_block(pre_state, signed_block.message.body)
- if store.time < block_time:
- on_tick_and_append_step(spec, store, block_time, test_steps)
+ block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT
+ while store.time < block_time:
+ time = pre_state.genesis_time + (spec.get_current_slot(store) + 1) * spec.config.SECONDS_PER_SLOT
+ on_tick_and_append_step(spec, store, time, test_steps)
post_state = yield from add_block(
spec, store, signed_block, test_steps,
@@ -39,6 +41,11 @@ def add_attestation(spec, store, attestation, test_steps, is_from_block=False):
test_steps.append({'attestation': get_attestation_file_name(attestation)})
+def add_attestations(spec, store, attestations, test_steps, is_from_block=False):
+ for attestation in attestations:
+ yield from add_attestation(spec, store, attestation, test_steps, is_from_block=is_from_block)
+
+
def tick_and_run_on_attestation(spec, store, attestation, test_steps, is_from_block=False):
parent_block = store.blocks[attestation.data.beacon_block_root]
pre_state = store.block_states[spec.hash_tree_root(parent_block)]
@@ -90,6 +97,7 @@ def get_attester_slashing_file_name(attester_slashing):
def on_tick_and_append_step(spec, store, time, test_steps):
spec.on_tick(store, time)
test_steps.append({'tick': int(time)})
+ output_store_checks(spec, store, test_steps)
def run_on_block(spec, store, signed_block, valid=True):
@@ -153,25 +161,7 @@ def add_block(spec,
assert store.blocks[block_root] == signed_block.message
assert store.block_states[block_root].hash_tree_root() == signed_block.message.state_root
if not is_optimistic:
- test_steps.append({
- 'checks': {
- 'time': int(store.time),
- 'head': get_formatted_head_output(spec, store),
- 'justified_checkpoint': {
- 'epoch': int(store.justified_checkpoint.epoch),
- 'root': encode_hex(store.justified_checkpoint.root),
- },
- 'finalized_checkpoint': {
- 'epoch': int(store.finalized_checkpoint.epoch),
- 'root': encode_hex(store.finalized_checkpoint.root),
- },
- 'best_justified_checkpoint': {
- 'epoch': int(store.best_justified_checkpoint.epoch),
- 'root': encode_hex(store.best_justified_checkpoint.root),
- },
- 'proposer_boost_root': encode_hex(store.proposer_boost_root),
- }
- })
+ output_store_checks(spec, store, test_steps)
return store.block_states[signed_block.message.hash_tree_root()]
@@ -217,6 +207,32 @@ def get_formatted_head_output(spec, store):
}
+def output_head_check(spec, store, test_steps):
+ test_steps.append({
+ 'checks': {
+ 'head': get_formatted_head_output(spec, store),
+ }
+ })
+
+
+def output_store_checks(spec, store, test_steps):
+ test_steps.append({
+ 'checks': {
+ 'time': int(store.time),
+ 'head': get_formatted_head_output(spec, store),
+ 'justified_checkpoint': {
+ 'epoch': int(store.justified_checkpoint.epoch),
+ 'root': encode_hex(store.justified_checkpoint.root),
+ },
+ 'finalized_checkpoint': {
+ 'epoch': int(store.finalized_checkpoint.epoch),
+ 'root': encode_hex(store.finalized_checkpoint.root),
+ },
+ 'proposer_boost_root': encode_hex(store.proposer_boost_root),
+ }
+ })
+
+
def apply_next_epoch_with_attestations(spec,
state,
store,
@@ -263,6 +279,39 @@ def apply_next_slots_with_attestations(spec,
return post_state, store, last_signed_block
+def is_ready_to_justify(spec, state):
+ """
+ Check if the given ``state`` will trigger justification updates at epoch boundary.
+ """
+ temp_state = state.copy()
+ spec.process_justification_and_finalization(temp_state)
+ return temp_state.current_justified_checkpoint.epoch > state.current_justified_checkpoint.epoch
+
+
+def find_next_justifying_slot(spec,
+ state,
+ fill_cur_epoch,
+ fill_prev_epoch,
+ participation_fn=None):
+ temp_state = state.copy()
+
+ signed_blocks = []
+ justifying_slot = None
+ while justifying_slot is None:
+ signed_block = state_transition_with_full_block(
+ spec,
+ temp_state,
+ fill_cur_epoch,
+ fill_prev_epoch,
+ participation_fn,
+ )
+ signed_blocks.append(signed_block)
+ if is_ready_to_justify(spec, temp_state):
+ justifying_slot = temp_state.slot
+
+ return signed_blocks, justifying_slot
+
+
def get_pow_block_file_name(pow_block):
return f"pow_block_{encode_hex(pow_block.block_hash)}"
diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py
index 96d0d20dcd..241c7dc37e 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py
@@ -47,6 +47,7 @@ def _set_operations_by_dict(block, operation_dict):
def _state_transition_and_sign_block_at_slot(spec,
state,
+ sync_aggregate=None,
operation_dict=None):
"""
Cribbed from ``transition_unsigned_block`` helper
@@ -61,6 +62,8 @@ def _state_transition_and_sign_block_at_slot(spec,
Thus use dict to pass operations.
"""
block = build_empty_block(spec, state)
+ if sync_aggregate is not None:
+ block.body.sync_aggregate = sync_aggregate
if operation_dict:
_set_operations_by_dict(block, operation_dict)
@@ -141,7 +144,7 @@ def state_transition_across_slots_with_ignoring_proposers(spec,
next_slot(spec, state)
-def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=None):
+def do_fork(state, spec, post_spec, fork_epoch, with_block=True, sync_aggregate=None, operation_dict=None):
spec.process_slots(state, state.slot + 1)
assert state.slot % spec.SLOTS_PER_EPOCH == 0
@@ -172,7 +175,12 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=
assert state.fork.current_version == post_spec.config.DENEB_FORK_VERSION
if with_block:
- return state, _state_transition_and_sign_block_at_slot(post_spec, state, operation_dict=operation_dict)
+ return state, _state_transition_and_sign_block_at_slot(
+ post_spec,
+ state,
+ sync_aggregate=sync_aggregate,
+ operation_dict=operation_dict,
+ )
else:
return state, None
diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client.py b/tests/core/pyspec/eth2spec/test/helpers/light_client.py
index 215d174fc8..ceca145e94 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/light_client.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/light_client.py
@@ -31,7 +31,7 @@ def get_sync_aggregate(spec, state, num_participants=None, signature_slot=None):
sync_committee_signature = compute_aggregate_sync_committee_signature(
spec,
signature_state,
- signature_slot,
+ max(signature_slot, 1) - 1,
committee_indices[:num_participants],
)
sync_aggregate = spec.SyncAggregate(
diff --git a/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py b/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py
index 6f42aa9bad..816c7a10b7 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py
@@ -177,7 +177,7 @@ def get_opt_head_block_root(spec, mega_store):
return head
# Sort by latest attesting balance with ties broken lexicographically
# Ties broken by favoring block with lexicographically higher root
- head = max(children, key=lambda root: (spec.get_latest_attesting_balance(store, root), root))
+ head = max(children, key=lambda root: (spec.get_weight(store, root), root))
def is_invalidated(mega_store, block_root):
diff --git a/tests/core/pyspec/eth2spec/test/helpers/sharding.py b/tests/core/pyspec/eth2spec/test/helpers/sharding.py
index fd60d5d3be..6b913b90ec 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/sharding.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/sharding.py
@@ -12,7 +12,7 @@
#
-# Containers from Deneb
+# Containers from EIP-4844
#
MAX_CALLDATA_SIZE = 2**24
MAX_VERSIONED_HASHES_LIST_SIZE = 2**24
@@ -101,13 +101,16 @@ def get_poly_in_both_forms(spec, rng=None):
def get_sample_opaque_tx(spec, blob_count=1, rng=None):
blobs = []
blob_kzg_commitments = []
+ blob_kzg_proofs = []
blob_versioned_hashes = []
for _ in range(blob_count):
blob = get_sample_blob(spec, rng)
blob_commitment = spec.KZGCommitment(spec.blob_to_kzg_commitment(blob))
+ blob_kzg_proof = spec.compute_blob_kzg_proof(blob, blob_commitment)
blob_versioned_hash = spec.kzg_commitment_to_versioned_hash(blob_commitment)
blobs.append(blob)
blob_kzg_commitments.append(blob_commitment)
+ blob_kzg_proofs.append(blob_kzg_proof)
blob_versioned_hashes.append(blob_versioned_hash)
signed_blob_tx = SignedBlobTransaction(
@@ -117,4 +120,4 @@ def get_sample_opaque_tx(spec, blob_count=1, rng=None):
)
serialized_tx = serialize(signed_blob_tx)
opaque_tx = spec.uint_to_bytes(spec.BLOB_TX_TYPE) + serialized_tx
- return opaque_tx, blobs, blob_kzg_commitments
+ return opaque_tx, blobs, blob_kzg_commitments, blob_kzg_proofs
diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py
index 0a145dfa52..15feffa83d 100644
--- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py
+++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py
@@ -1,7 +1,7 @@
from eth2spec.test.context import (
MAINNET,
spec_state_test,
- with_all_phases,
+ with_altair_and_later,
with_presets,
)
from eth2spec.test.helpers.attestations import (
@@ -31,7 +31,7 @@ def _apply_base_block_a(spec, state, store, test_steps):
assert spec.get_head(store) == signed_block_a.message.hash_tree_root()
-@with_all_phases
+@with_altair_and_later
@spec_state_test
def test_ex_ante_vanilla(spec, state):
"""
@@ -118,7 +118,7 @@ def _get_greater_than_proposer_boost_score(spec, store, state, proposer_boost_ro
return proposer_score // base_effective_balance + 1
-@with_all_phases
+@with_altair_and_later
@with_presets([MAINNET], reason="to create non-duplicate committee")
@spec_state_test
def test_ex_ante_attestations_is_greater_than_proposer_boost_with_boost(spec, state):
@@ -191,7 +191,7 @@ def _filter_participant_set(participants):
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@spec_state_test
def test_ex_ante_sandwich_without_attestations(spec, state):
"""
@@ -254,7 +254,7 @@ def test_ex_ante_sandwich_without_attestations(spec, state):
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@spec_state_test
def test_ex_ante_sandwich_with_honest_attestation(spec, state):
"""
@@ -335,7 +335,7 @@ def _filter_participant_set(participants):
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@with_presets([MAINNET], reason="to create non-duplicate committee")
@spec_state_test
def test_ex_ante_sandwich_with_boost_not_sufficient(spec, state):
diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py
index 990c420313..f5960ff703 100644
--- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py
+++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py
@@ -1,9 +1,8 @@
import random
-from eth_utils import encode_hex
from eth2spec.test.context import (
spec_state_test,
- with_all_phases,
+ with_altair_and_later,
with_presets,
)
from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations
@@ -22,6 +21,8 @@
add_attestation,
tick_and_run_on_attestation,
tick_and_add_block,
+ output_head_check,
+ apply_next_epoch_with_attestations,
)
from eth2spec.test.helpers.forks import (
is_post_altair,
@@ -36,7 +37,7 @@
rng = random.Random(1001)
-@with_all_phases
+@with_altair_and_later
@spec_state_test
def test_genesis(spec, state):
test_steps = []
@@ -60,7 +61,7 @@ def test_genesis(spec, state):
yield 'description', 'meta', f"Although it's not phase 0, we may use {spec.fork} spec to start testnets."
-@with_all_phases
+@with_altair_and_later
@spec_state_test
def test_chain_no_attestations(spec, state):
test_steps = []
@@ -71,11 +72,7 @@ def test_chain_no_attestations(spec, state):
anchor_root = get_anchor_root(spec, state)
assert spec.get_head(store) == anchor_root
- test_steps.append({
- 'checks': {
- 'head': get_formatted_head_output(spec, store),
- }
- })
+ output_head_check(spec, store, test_steps)
# On receiving a block of `GENESIS_SLOT + 1` slot
block_1 = build_empty_block_for_next_slot(spec, state)
@@ -88,16 +85,12 @@ def test_chain_no_attestations(spec, state):
yield from tick_and_add_block(spec, store, signed_block_2, test_steps)
assert spec.get_head(store) == spec.hash_tree_root(block_2)
- test_steps.append({
- 'checks': {
- 'head': get_formatted_head_output(spec, store),
- }
- })
+ output_head_check(spec, store, test_steps)
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@spec_state_test
def test_split_tie_breaker_no_attestations(spec, state):
test_steps = []
@@ -109,11 +102,7 @@ def test_split_tie_breaker_no_attestations(spec, state):
yield 'anchor_block', anchor_block
anchor_root = get_anchor_root(spec, state)
assert spec.get_head(store) == anchor_root
- test_steps.append({
- 'checks': {
- 'head': get_formatted_head_output(spec, store),
- }
- })
+ output_head_check(spec, store, test_steps)
# Create block at slot 1
block_1_state = genesis_state.copy()
@@ -135,16 +124,12 @@ def test_split_tie_breaker_no_attestations(spec, state):
highest_root = max(spec.hash_tree_root(block_1), spec.hash_tree_root(block_2))
assert spec.get_head(store) == highest_root
- test_steps.append({
- 'checks': {
- 'head': get_formatted_head_output(spec, store),
- }
- })
+ output_head_check(spec, store, test_steps)
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@spec_state_test
def test_shorter_chain_but_heavier_weight(spec, state):
test_steps = []
@@ -156,11 +141,7 @@ def test_shorter_chain_but_heavier_weight(spec, state):
yield 'anchor_block', anchor_block
anchor_root = get_anchor_root(spec, state)
assert spec.get_head(store) == anchor_root
- test_steps.append({
- 'checks': {
- 'head': get_formatted_head_output(spec, store),
- }
- })
+ output_head_check(spec, store, test_steps)
# build longer tree
long_state = genesis_state.copy()
@@ -183,16 +164,12 @@ def test_shorter_chain_but_heavier_weight(spec, state):
yield from tick_and_run_on_attestation(spec, store, short_attestation, test_steps)
assert spec.get_head(store) == spec.hash_tree_root(short_block)
- test_steps.append({
- 'checks': {
- 'head': get_formatted_head_output(spec, store),
- }
- })
+ output_head_check(spec, store, test_steps)
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@spec_state_test
@with_presets([MINIMAL], reason="too slow")
def test_filtered_block_tree(spec, state):
@@ -203,11 +180,7 @@ def test_filtered_block_tree(spec, state):
yield 'anchor_block', anchor_block
anchor_root = get_anchor_root(spec, state)
assert spec.get_head(store) == anchor_root
- test_steps.append({
- 'checks': {
- 'head': get_formatted_head_output(spec, store),
- }
- })
+ output_head_check(spec, store, test_steps)
# transition state past initial couple of epochs
next_epoch(spec, state)
@@ -227,13 +200,7 @@ def test_filtered_block_tree(spec, state):
# the last block in the branch should be the head
expected_head_root = spec.hash_tree_root(signed_blocks[-1].message)
assert spec.get_head(store) == expected_head_root
-
- test_steps.append({
- 'checks': {
- 'head': get_formatted_head_output(spec, store),
- 'justified_checkpoint_root': encode_hex(store.justified_checkpoint.root),
- }
- })
+ output_head_check(spec, store, test_steps)
#
# create branch containing the justified block but not containing enough on
@@ -274,16 +241,12 @@ def test_filtered_block_tree(spec, state):
# ensure that get_head still returns the head from the previous branch
assert spec.get_head(store) == expected_head_root
- test_steps.append({
- 'checks': {
- 'head': get_formatted_head_output(spec, store)
- }
- })
+ output_head_check(spec, store, test_steps)
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@spec_state_test
def test_proposer_boost_correct_head(spec, state):
test_steps = []
@@ -295,11 +258,7 @@ def test_proposer_boost_correct_head(spec, state):
yield 'anchor_block', anchor_block
anchor_root = get_anchor_root(spec, state)
assert spec.get_head(store) == anchor_root
- test_steps.append({
- 'checks': {
- 'head': get_formatted_head_output(spec, store),
- }
- })
+ output_head_check(spec, store, test_steps)
# Build block that serves as head ONLY on timely arrival, and ONLY in that slot
state_1 = genesis_state.copy()
@@ -337,19 +296,14 @@ def test_proposer_boost_correct_head(spec, state):
on_tick_and_append_step(spec, store, time, test_steps)
assert store.proposer_boost_root == spec.Root()
assert spec.get_head(store) == spec.hash_tree_root(block_2)
-
- test_steps.append({
- 'checks': {
- 'head': get_formatted_head_output(spec, store),
- }
- })
+ output_head_check(spec, store, test_steps)
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@spec_state_test
-def test_discard_equivocations(spec, state):
+def test_discard_equivocations_on_attester_slashing(spec, state):
test_steps = []
genesis_state = state.copy()
@@ -359,11 +313,7 @@ def test_discard_equivocations(spec, state):
yield 'anchor_block', anchor_block
anchor_root = get_anchor_root(spec, state)
assert spec.get_head(store) == anchor_root
- test_steps.append({
- 'checks': {
- 'head': get_formatted_head_output(spec, store),
- }
- })
+ output_head_check(spec, store, test_steps)
# Build block that serves as head before discarding equivocations
state_1 = genesis_state.copy()
@@ -418,11 +368,359 @@ def test_discard_equivocations(spec, state):
# The head should revert to block_2
yield from add_attester_slashing(spec, store, attester_slashing, test_steps)
assert spec.get_head(store) == spec.hash_tree_root(block_2)
+ output_head_check(spec, store, test_steps)
- test_steps.append({
- 'checks': {
- 'head': get_formatted_head_output(spec, store),
- }
- })
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_discard_equivocations_slashed_validator_censoring(spec, state):
+ # Check that the store does not count LMD votes from validators that are slashed in the justified state
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 0
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 0
+ assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0
+
+ # We will slash all validators voting at the 2nd slot of epoch 0
+ current_slot = spec.get_current_slot(store)
+ eqv_slot = current_slot + 1
+ eqv_epoch = spec.compute_epoch_at_slot(eqv_slot)
+ assert eqv_slot % spec.SLOTS_PER_EPOCH == 1
+ assert eqv_epoch == 0
+ slashed_validators = []
+ comm_count = spec.get_committee_count_per_slot(state, eqv_epoch)
+ for comm_index in range(comm_count):
+ comm = spec.get_beacon_committee(state, eqv_slot, comm_index)
+ slashed_validators += comm
+ assert len(slashed_validators) > 0
+
+ # Slash those validators in the state
+ for val_index in slashed_validators:
+ state.validators[val_index].slashed = True
+
+ # Store this state as the anchor state
+ anchor_state = state.copy()
+ # Generate an anchor block with correct state root
+ anchor_block = spec.BeaconBlock(state_root=anchor_state.hash_tree_root())
+ yield 'anchor_state', anchor_state
+ yield 'anchor_block', anchor_block
+
+ # Get a new store with the anchor state & anchor block
+ store = spec.get_forkchoice_store(anchor_state, anchor_block)
+
+ # Now generate the store checks
+ current_time = anchor_state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ # Create two competing blocks at eqv_slot
+ next_slots(spec, state, eqv_slot - state.slot - 1)
+ assert state.slot == eqv_slot - 1
+
+ state_1 = state.copy()
+ block_1 = build_empty_block_for_next_slot(spec, state_1)
+ signed_block_1 = state_transition_and_sign_block(spec, state_1, block_1)
+
+ state_2 = state.copy()
+ block_2 = build_empty_block_for_next_slot(spec, state_2)
+ block_2.body.graffiti = block_2.body.graffiti = b'\x42' * 32
+ signed_block_2 = state_transition_and_sign_block(spec, state_2, block_2)
+
+ assert block_1.slot == block_2.slot == eqv_slot
+
+ # Add both blocks to the store
+ yield from tick_and_add_block(spec, store, signed_block_1, test_steps)
+ yield from tick_and_add_block(spec, store, signed_block_2, test_steps)
+
+ # Find out which block will win in tie breaking
+ if spec.hash_tree_root(block_1) < spec.hash_tree_root(block_2):
+ block_low_root = block_1.hash_tree_root()
+ block_low_root_post_state = state_1
+ block_high_root = block_2.hash_tree_root()
+ else:
+ block_low_root = block_2.hash_tree_root()
+ block_low_root_post_state = state_2
+ block_high_root = block_1.hash_tree_root()
+ assert block_low_root < block_high_root
+
+ # Tick to next slot so proposer boost does not apply
+ current_time = store.genesis_time + (block_1.slot + 1) * spec.config.SECONDS_PER_SLOT
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+
+ # Check that block with higher root wins
+ assert spec.get_head(store) == block_high_root
+
+ # Create attestation for block with lower root
+ attestation = get_valid_attestation(spec, block_low_root_post_state, slot=eqv_slot, index=0, signed=True)
+ # Check that all attesting validators were slashed in the anchor state
+ att_comm = spec.get_beacon_committee(block_low_root_post_state, eqv_slot, 0)
+ for i in att_comm:
+ assert anchor_state.validators[i].slashed
+ # Add attestation to the store
+ yield from add_attestation(spec, store, attestation, test_steps)
+ # Check that block with higher root still wins
+ assert spec.get_head(store) == block_high_root
+ output_head_check(spec, store, test_steps)
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_voting_source_within_two_epoch(spec, state):
+ """
+ Check that the store allows for a head block that has:
+ - store.voting_source[block_root].epoch != store.justified_checkpoint.epoch, and
+ - store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch, and
+ - store.voting_source[block_root].epoch + 2 >= current_epoch, and
+ - store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot)
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert store.finalized_checkpoint.epoch == 2
+
+ # Copy the state to use later
+ fork_state = state.copy()
+
+ # Fill epoch 4
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
+ assert store.finalized_checkpoint.epoch == 3
+
+ # Create a fork from the earlier saved state
+ next_epoch(spec, fork_state)
+ assert spec.compute_epoch_at_slot(fork_state.slot) == 5
+ _, signed_blocks, fork_state = next_epoch_with_attestations(spec, fork_state, True, True)
+ # Only keep the blocks from epoch 5, so discard the last generated block
+ signed_blocks = signed_blocks[:-1]
+ last_fork_block = signed_blocks[-1].message
+ assert spec.compute_epoch_at_slot(last_fork_block.slot) == 5
+
+ # Now add the fork to the store
+ for signed_block in signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
+ assert store.finalized_checkpoint.epoch == 3
+
+ # Check that the last block from the fork is the head
+ # LMD votes for the competing branch are overwritten so this fork should win
+ last_fork_block_root = last_fork_block.hash_tree_root()
+ # assert store.voting_source[last_fork_block_root].epoch != store.justified_checkpoint.epoch
+ assert store.unrealized_justifications[last_fork_block_root].epoch >= store.justified_checkpoint.epoch
+ # assert store.voting_source[last_fork_block_root].epoch + 2 >= \
+ # spec.compute_epoch_at_slot(spec.get_current_slot(store))
+ finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
+ assert store.finalized_checkpoint.root == spec.get_ancestor(store, last_fork_block_root, finalized_slot)
+ assert spec.get_head(store) == last_fork_block_root
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_voting_source_beyond_two_epoch(spec, state):
+ """
+ Check that the store doesn't allow for a head block that has:
+ - store.voting_source[block_root].epoch != store.justified_checkpoint.epoch, and
+ - store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch, and
+ - store.voting_source[block_root].epoch + 2 < current_epoch, and
+ - store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot)
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert store.finalized_checkpoint.epoch == 2
+
+ # Copy the state to use later
+ fork_state = state.copy()
+
+ # Fill epoch 4 and 5
+ for _ in range(2):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5
+ assert store.finalized_checkpoint.epoch == 4
+
+ # Create a fork from the earlier saved state
+ for _ in range(2):
+ next_epoch(spec, fork_state)
+ assert spec.compute_epoch_at_slot(fork_state.slot) == 6
+ assert fork_state.current_justified_checkpoint.epoch == 3
+ _, signed_blocks, fork_state = next_epoch_with_attestations(spec, fork_state, True, True)
+ # Only keep the blocks from epoch 6, so discard the last generated block
+ signed_blocks = signed_blocks[:-1]
+ last_fork_block = signed_blocks[-1].message
+ assert spec.compute_epoch_at_slot(last_fork_block.slot) == 6
+
+ # Store the head before adding the fork to the store
+ correct_head = spec.get_head(store)
+
+ # Now add the fork to the store
+ for signed_block in signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5
+ assert store.finalized_checkpoint.epoch == 4
+
+ last_fork_block_root = last_fork_block.hash_tree_root()
+ last_fork_block_state = store.block_states[last_fork_block_root]
+ assert last_fork_block_state.current_justified_checkpoint.epoch == 3
+
+ # Check that the head is unchanged
+ # assert store.voting_source[last_fork_block_root].epoch != store.justified_checkpoint.epoch
+ assert store.unrealized_justifications[last_fork_block_root].epoch >= store.justified_checkpoint.epoch
+ # assert store.voting_source[last_fork_block_root].epoch + 2 < \
+ # spec.compute_epoch_at_slot(spec.get_current_slot(store))
+ finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
+ assert store.finalized_checkpoint.root == spec.get_ancestor(store, last_fork_block_root, finalized_slot)
+ assert spec.get_head(store) == correct_head
+
+ yield 'steps', test_steps
+
+
+"""
+Note:
+We are unable to generate test vectors that check failure of the correct_finalized condition.
+We cannot generate a block that:
+- has !correct_finalized, and
+- has correct_justified, and
+- is a descendant of store.justified_checkpoint.root
+
+The block being a descendant of store.justified_checkpoint.root is necessary because
+filter_block_tree descends the tree starting at store.justified_checkpoint.root
+
+@with_altair_and_later
+@spec_state_test
+def test_incorrect_finalized(spec, state):
+ # Check that the store doesn't allow for a head block that has:
+ # - store.voting_source[block_root].epoch == store.justified_checkpoint.epoch, and
+ # - store.finalized_checkpoint.epoch != GENESIS_EPOCH, and
+ # - store.finalized_checkpoint.root != get_ancestor(store, block_root, finalized_slot)
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 4
+ for _ in range(4):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
+ assert store.finalized_checkpoint.epoch == 3
+
+ # Identify the fork block as the last block in epoch 4
+ fork_block_root = state.latest_block_header.parent_root
+ fork_block = store.blocks[fork_block_root]
+ assert spec.compute_epoch_at_slot(fork_block.slot) == 4
+ # Copy the state to use later
+ fork_state = store.block_states[fork_block_root].copy()
+ assert spec.compute_epoch_at_slot(fork_state.slot) == 4
+ assert fork_state.current_justified_checkpoint.epoch == 3
+ assert fork_state.finalized_checkpoint.epoch == 2
+
+ # Create a fork from the earlier saved state
+ for _ in range(2):
+ next_epoch(spec, fork_state)
+ assert spec.compute_epoch_at_slot(fork_state.slot) == 6
+ assert fork_state.current_justified_checkpoint.epoch == 4
+ assert fork_state.finalized_checkpoint.epoch == 3
+ # Fill epoch 6
+ signed_blocks = []
+ _, signed_blocks_1, fork_state = next_epoch_with_attestations(spec, fork_state, True, False)
+ signed_blocks += signed_blocks_1
+ assert spec.compute_epoch_at_slot(fork_state.slot) == 7
+ # Check that epoch 6 is justified in this fork - it will be used as voting source for the tip of this fork
+ assert fork_state.current_justified_checkpoint.epoch == 6
+ assert fork_state.finalized_checkpoint.epoch == 3
+ # Create a chain in epoch 7 that has new justification for epoch 7
+ _, signed_blocks_2, fork_state = next_epoch_with_attestations(spec, fork_state, True, False)
+ # Only keep the blocks from epoch 7, so discard the last generated block
+ signed_blocks_2 = signed_blocks_2[:-1]
+ signed_blocks += signed_blocks_2
+ last_fork_block = signed_blocks[-1].message
+ assert spec.compute_epoch_at_slot(last_fork_block.slot) == 7
+
+ # Now add the fork to the store
+ for signed_block in signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7
+ assert store.justified_checkpoint.epoch == 6
+ assert store.finalized_checkpoint.epoch == 3
+
+ # Fill epoch 5 and 6 in the original chain
+ for _ in range(2):
+ state, store, signed_head_block = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, False, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 6
+ assert store.finalized_checkpoint.epoch == 5
+ # Store the expected head
+ head_root = signed_head_block.message.hash_tree_root()
+
+ # Check that the head is unchanged
+ last_fork_block_root = last_fork_block.hash_tree_root()
+ assert store.voting_source[last_fork_block_root].epoch == store.justified_checkpoint.epoch
+ assert store.finalized_checkpoint.epoch != spec.GENESIS_EPOCH
+ finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
+ assert store.finalized_checkpoint.root != spec.get_ancestor(store, last_fork_block_root, finalized_slot)
+ assert spec.get_head(store) != last_fork_block_root
+ assert spec.get_head(store) == head_root
yield 'steps', test_steps
+"""
diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py
index eede246302..0af7753391 100644
--- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py
+++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py
@@ -2,12 +2,15 @@
from eth_utils import encode_hex
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
-from eth2spec.test.context import MINIMAL, spec_state_test, with_all_phases, with_presets
+from eth2spec.test.context import (
+ MINIMAL,
+ spec_state_test,
+ with_altair_and_later,
+ with_presets
+)
from eth2spec.test.helpers.attestations import (
next_epoch_with_attestations,
next_slots_with_attestations,
- state_transition_with_full_block,
- state_transition_with_full_attestations_block,
)
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot,
@@ -22,6 +25,8 @@
tick_and_add_block,
apply_next_epoch_with_attestations,
apply_next_slots_with_attestations,
+ is_ready_to_justify,
+ find_next_justifying_slot,
)
from eth2spec.test.helpers.state import (
next_epoch,
@@ -41,7 +46,7 @@ def _drop_random_one_third(_slot, _index, indices):
return rng.sample(sorted(indices), participant_count)
-@with_all_phases
+@with_altair_and_later
@spec_state_test
def test_basic(spec, state):
test_steps = []
@@ -71,7 +76,7 @@ def test_basic(spec, state):
# TODO: add tests for justified_root and finalized_root
-@with_all_phases
+@with_altair_and_later
@spec_state_test
@with_presets([MINIMAL], reason="too slow")
def test_on_block_checkpoints(spec, state):
@@ -108,7 +113,7 @@ def test_on_block_checkpoints(spec, state):
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@spec_state_test
def test_on_block_future_block(spec, state):
test_steps = []
@@ -129,7 +134,7 @@ def test_on_block_future_block(spec, state):
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@spec_state_test
def test_on_block_bad_parent_root(spec, state):
test_steps = []
@@ -155,7 +160,7 @@ def test_on_block_bad_parent_root(spec, state):
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@spec_state_test
@with_presets([MINIMAL], reason="too slow")
def test_on_block_before_finalized(spec, state):
@@ -187,7 +192,7 @@ def test_on_block_before_finalized(spec, state):
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@spec_state_test
@with_presets([MINIMAL], reason="too slow")
def test_on_block_finalized_skip_slots(spec, state):
@@ -234,7 +239,7 @@ def test_on_block_finalized_skip_slots(spec, state):
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@spec_state_test
@with_presets([MINIMAL], reason="too slow")
def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state):
@@ -280,301 +285,22 @@ def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state):
yield 'steps', test_steps
-@with_all_phases
-@spec_state_test
-@with_presets([MINIMAL], reason="mainnet config requires too many pre-generated public/private keys")
-def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state):
- """
- Test `should_update_justified_checkpoint`:
- compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED
- """
- test_steps = []
- # Initialization
- store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
- yield 'anchor_state', state
- yield 'anchor_block', anchor_block
- current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
- on_tick_and_append_step(spec, store, current_time, test_steps)
- assert store.time == current_time
-
- # Skip epoch 0 & 1
- for _ in range(2):
- next_epoch(spec, state)
- # Fill epoch 2
- state, store, _ = yield from apply_next_epoch_with_attestations(
- spec, state, store, True, False, test_steps=test_steps)
- assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0
- assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2
- # Skip epoch 3 & 4
- for _ in range(2):
- next_epoch(spec, state)
- # Epoch 5: Attest current epoch
- state, store, _ = yield from apply_next_epoch_with_attestations(
- spec, state, store, True, False, participation_fn=_drop_random_one_third, test_steps=test_steps)
- assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0
- assert state.current_justified_checkpoint.epoch == 2
- assert store.justified_checkpoint.epoch == 2
- assert state.current_justified_checkpoint == store.justified_checkpoint
-
- # Skip epoch 6
- next_epoch(spec, state)
-
- pre_state = state.copy()
-
- # Build a block to justify epoch 5
- signed_block = state_transition_with_full_block(spec, state, True, True)
- assert state.finalized_checkpoint.epoch == 0
- assert state.current_justified_checkpoint.epoch == 5
- assert state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch
- assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH < spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
- # Run on_block
- yield from tick_and_add_block(spec, store, signed_block, test_steps)
- # Ensure justified_checkpoint has been changed but finality is unchanged
- assert store.justified_checkpoint.epoch == 5
- assert store.justified_checkpoint == state.current_justified_checkpoint
- assert store.finalized_checkpoint.epoch == pre_state.finalized_checkpoint.epoch == 0
-
- yield 'steps', test_steps
-
-
-@with_all_phases
-@with_presets([MINIMAL], reason="It assumes that `MAX_ATTESTATIONS` >= 2/3 attestations of an epoch")
-@spec_state_test
-def test_on_block_outside_safe_slots_but_finality(spec, state):
- """
- Test `should_update_justified_checkpoint` case
- - compute_slots_since_epoch_start(get_current_slot(store)) > SAFE_SLOTS_TO_UPDATE_JUSTIFIED
- - new_justified_checkpoint and store.justified_checkpoint.root are NOT conflicting
-
- Thus should_update_justified_checkpoint returns True.
-
- Part of this script is similar to `test_new_justified_is_later_than_store_justified`.
- """
- test_steps = []
- # Initialization
- store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
- yield 'anchor_state', state
- yield 'anchor_block', anchor_block
- current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
- on_tick_and_append_step(spec, store, current_time, test_steps)
- assert store.time == current_time
-
- # Skip epoch 0
- next_epoch(spec, state)
- # Fill epoch 1 to 3, attest current epoch
- for _ in range(3):
- state, store, _ = yield from apply_next_epoch_with_attestations(
- spec, state, store, True, False, test_steps=test_steps)
- assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
- assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
-
- # Skip epoch 4-6
- for _ in range(3):
- next_epoch(spec, state)
-
- # epoch 7
- state, store, _ = yield from apply_next_epoch_with_attestations(
- spec, state, store, True, True, test_steps=test_steps)
- assert state.finalized_checkpoint.epoch == 2
- assert state.current_justified_checkpoint.epoch == 7
-
- # epoch 8, attest the first 5 blocks
- state, store, _ = yield from apply_next_slots_with_attestations(
- spec, state, store, 5, True, True, test_steps)
- assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
- assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 7
-
- # Propose a block at epoch 9, 5th slot
- next_epoch(spec, state)
- next_slots(spec, state, 4)
- signed_block = state_transition_with_full_attestations_block(spec, state, True, True)
- yield from tick_and_add_block(spec, store, signed_block, test_steps)
- assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
- assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 7
-
- # Propose an empty block at epoch 10, SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2 slot
- # This block would trigger justification and finality updates on store
- next_epoch(spec, state)
- next_slots(spec, state, 4)
- block = build_empty_block_for_next_slot(spec, state)
- signed_block = state_transition_and_sign_block(spec, state, block)
- assert state.finalized_checkpoint.epoch == 7
- assert state.current_justified_checkpoint.epoch == 8
- # Step time past safe slots and run on_block
- if store.time < spec.compute_time_at_slot(state, signed_block.message.slot):
- time = store.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT
- on_tick_and_append_step(spec, store, time, test_steps)
- assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
- yield from add_block(spec, store, signed_block, test_steps)
-
- # Ensure justified_checkpoint finality has been changed
- assert store.finalized_checkpoint.epoch == 7
- assert store.finalized_checkpoint == state.finalized_checkpoint
- assert store.justified_checkpoint.epoch == 8
- assert store.justified_checkpoint == state.current_justified_checkpoint
-
- yield 'steps', test_steps
-
-
-@with_all_phases
-@with_presets([MINIMAL], reason="It assumes that `MAX_ATTESTATIONS` >= 2/3 attestations of an epoch")
-@spec_state_test
-def test_new_justified_is_later_than_store_justified(spec, state):
- """
- J: Justified
- F: Finalized
- fork_1_state (forked from genesis):
- epoch
- [0] <- [1] <- [2] <- [3] <- [4]
- F J
-
- fork_2_state (forked from fork_1_state's epoch 2):
- epoch
- └──── [3] <- [4] <- [5] <- [6]
- F J
-
- fork_3_state (forked from genesis):
- [0] <- [1] <- [2] <- [3] <- [4] <- [5]
- F J
- """
- # The 1st fork, from genesis
- fork_1_state = state.copy()
- # The 3rd fork, from genesis
- fork_3_state = state.copy()
-
- test_steps = []
- # Initialization
- store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
- yield 'anchor_state', state
- yield 'anchor_block', anchor_block
- current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
- on_tick_and_append_step(spec, store, current_time, test_steps)
- assert store.time == current_time
-
- # ----- Process fork_1_state
- # Skip epoch 0
- next_epoch(spec, fork_1_state)
- # Fill epoch 1 with previous epoch attestations
- fork_1_state, store, _ = yield from apply_next_epoch_with_attestations(
- spec, fork_1_state, store, False, True, test_steps=test_steps)
-
- # Fork `fork_2_state` at the start of epoch 2
- fork_2_state = fork_1_state.copy()
- assert spec.get_current_epoch(fork_2_state) == 2
-
- # Skip epoch 2
- next_epoch(spec, fork_1_state)
- # # Fill epoch 3 & 4 with previous epoch attestations
- for _ in range(2):
- fork_1_state, store, _ = yield from apply_next_epoch_with_attestations(
- spec, fork_1_state, store, False, True, test_steps=test_steps)
-
- assert fork_1_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0
- assert fork_1_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
- assert store.justified_checkpoint == fork_1_state.current_justified_checkpoint
-
- # ------ fork_2_state: Create a chain to set store.best_justified_checkpoint
- # NOTE: The goal is to make `store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch`
- all_blocks = []
-
- # Proposed an empty block at epoch 2, 1st slot
- block = build_empty_block_for_next_slot(spec, fork_2_state)
- signed_block = state_transition_and_sign_block(spec, fork_2_state, block)
- yield from tick_and_add_block(spec, store, signed_block, test_steps)
- assert fork_2_state.current_justified_checkpoint.epoch == 0
-
- # Skip to epoch 4
- for _ in range(2):
- next_epoch(spec, fork_2_state)
- assert fork_2_state.current_justified_checkpoint.epoch == 0
-
- # Propose a block at epoch 4, 5th slot
- # Propose a block at epoch 5, 5th slot
- for _ in range(2):
- next_epoch(spec, fork_2_state)
- next_slots(spec, fork_2_state, 4)
- signed_block = state_transition_with_full_attestations_block(spec, fork_2_state, True, True)
- yield from tick_and_add_block(spec, store, signed_block, test_steps)
- assert fork_2_state.current_justified_checkpoint.epoch == 0
-
- # Propose a block at epoch 6, SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2 slot
- next_epoch(spec, fork_2_state)
- next_slots(spec, fork_2_state, spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2)
- signed_block = state_transition_with_full_attestations_block(spec, fork_2_state, True, True)
- assert fork_2_state.finalized_checkpoint.epoch == 0
- assert fork_2_state.current_justified_checkpoint.epoch == 5
- # Check SAFE_SLOTS_TO_UPDATE_JUSTIFIED
- time = store.genesis_time + fork_2_state.slot * spec.config.SECONDS_PER_SLOT
- on_tick_and_append_step(spec, store, time, test_steps)
- assert spec.compute_slots_since_epoch_start(spec.get_current_slot(store)) >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
- # Run on_block
- yield from add_block(spec, store, signed_block, test_steps)
- assert store.finalized_checkpoint.epoch == 0
- assert store.justified_checkpoint.epoch == 3
- assert store.best_justified_checkpoint.epoch == 5
-
- # ------ fork_3_state: Create another chain to test the
- # "Update justified if new justified is later than store justified" case
- all_blocks = []
- for _ in range(3):
- next_epoch(spec, fork_3_state)
-
- # epoch 3
- _, signed_blocks, fork_3_state = next_epoch_with_attestations(spec, fork_3_state, True, True)
- all_blocks += signed_blocks
- assert fork_3_state.finalized_checkpoint.epoch == 0
-
- # epoch 4, attest the first 5 blocks
- _, blocks, fork_3_state = next_slots_with_attestations(spec, fork_3_state, 5, True, True)
- all_blocks += blocks.copy()
- assert fork_3_state.finalized_checkpoint.epoch == 0
-
- # Propose a block at epoch 5, 5th slot
- next_epoch(spec, fork_3_state)
- next_slots(spec, fork_3_state, 4)
- signed_block = state_transition_with_full_block(spec, fork_3_state, True, True)
- all_blocks.append(signed_block.copy())
- assert fork_3_state.finalized_checkpoint.epoch == 0
-
- # Propose a block at epoch 6, 5th slot
- next_epoch(spec, fork_3_state)
- next_slots(spec, fork_3_state, 4)
- signed_block = state_transition_with_full_block(spec, fork_3_state, True, True)
- all_blocks.append(signed_block.copy())
- assert fork_3_state.finalized_checkpoint.epoch == 3
- assert fork_3_state.current_justified_checkpoint.epoch == 4
-
- # Apply blocks of `fork_3_state` to `store`
- for block in all_blocks:
- if store.time < spec.compute_time_at_slot(fork_2_state, block.message.slot):
- time = store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT
- on_tick_and_append_step(spec, store, time, test_steps)
- yield from add_block(spec, store, block, test_steps)
-
- assert store.finalized_checkpoint == fork_3_state.finalized_checkpoint
- assert store.justified_checkpoint == fork_3_state.current_justified_checkpoint
- assert store.justified_checkpoint != store.best_justified_checkpoint
- assert store.best_justified_checkpoint == fork_2_state.current_justified_checkpoint
-
- yield 'steps', test_steps
-
-
-@with_all_phases
+"""
+@with_altair_and_later
@spec_state_test
@with_presets([MINIMAL], reason="too slow")
def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state):
- """
- J: Justified
- F: Finalized
- state (forked from genesis):
- epoch
- [0] <- [1] <- [2] <- [3] <- [4] <- [5]
- F J
+ # J: Justified
+ # F: Finalized
+ # state (forked from genesis):
+ # epoch
+ # [0] <- [1] <- [2] <- [3] <- [4] <- [5]
+ # F J
+
+ # another_state (forked from epoch 0):
+ # └──── [1] <- [2] <- [3] <- [4] <- [5]
+ # F J
- another_state (forked from epoch 0):
- └──── [1] <- [2] <- [3] <- [4] <- [5]
- F J
- """
test_steps = []
# Initialization
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
@@ -631,12 +357,18 @@ def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state):
assert ancestor_at_finalized_slot != store.finalized_checkpoint.root
assert store.finalized_checkpoint == another_state.finalized_checkpoint
- assert store.justified_checkpoint == another_state.current_justified_checkpoint
+
+ # NOTE: inconsistent justified/finalized checkpoints in this edge case.
+ # This can only happen when >1/3 validators are slashable, as this testcase requires that
+ # store.justified_checkpoint is higher than store.finalized_checkpoint and on a different branch.
+ # Ignoring this testcase for now.
+ assert store.justified_checkpoint != another_state.current_justified_checkpoint
yield 'steps', test_steps
+"""
-@with_all_phases
+@with_altair_and_later
@spec_state_test
@with_presets([MINIMAL], reason="too slow")
def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state):
@@ -701,12 +433,14 @@ def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state):
assert ancestor_at_finalized_slot == store.finalized_checkpoint.root
assert store.finalized_checkpoint == another_state.finalized_checkpoint
- assert store.justified_checkpoint == another_state.current_justified_checkpoint
+
+ # NOTE: inconsistent justified/finalized checkpoints in this edge case
+ assert store.justified_checkpoint != another_state.current_justified_checkpoint
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@spec_state_test
def test_proposer_boost(spec, state):
test_steps = []
@@ -729,14 +463,14 @@ def test_proposer_boost(spec, state):
on_tick_and_append_step(spec, store, time, test_steps)
yield from add_block(spec, store, signed_block, test_steps)
assert store.proposer_boost_root == spec.hash_tree_root(block)
- assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) > 0
+ assert spec.get_weight(store, spec.hash_tree_root(block)) > 0
# Ensure that boost is removed after slot is over
time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT +
spec.config.SECONDS_PER_SLOT)
on_tick_and_append_step(spec, store, time, test_steps)
assert store.proposer_boost_root == spec.Root()
- assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) == 0
+ assert spec.get_weight(store, spec.hash_tree_root(block)) == 0
next_slots(spec, state, 3)
block = build_empty_block_for_next_slot(spec, state)
@@ -747,14 +481,14 @@ def test_proposer_boost(spec, state):
on_tick_and_append_step(spec, store, time, test_steps)
yield from add_block(spec, store, signed_block, test_steps)
assert store.proposer_boost_root == spec.hash_tree_root(block)
- assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) > 0
+ assert spec.get_weight(store, spec.hash_tree_root(block)) > 0
# Ensure that boost is removed after slot is over
time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT +
spec.config.SECONDS_PER_SLOT)
on_tick_and_append_step(spec, store, time, test_steps)
assert store.proposer_boost_root == spec.Root()
- assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) == 0
+ assert spec.get_weight(store, spec.hash_tree_root(block)) == 0
test_steps.append({
'checks': {
@@ -765,7 +499,7 @@ def test_proposer_boost(spec, state):
yield 'steps', test_steps
-@with_all_phases
+@with_altair_and_later
@spec_state_test
def test_proposer_boost_root_same_slot_untimely_block(spec, state):
test_steps = []
@@ -797,3 +531,797 @@ def test_proposer_boost_root_same_slot_untimely_block(spec, state):
})
yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_justification_withholding(spec, state):
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ for _ in range(2):
+ next_epoch(spec, state)
+
+ for _ in range(2):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert spec.get_current_epoch(state) == 4
+
+ # ------------
+
+ # Create attacker's fork that can justify epoch 4
+ # Do not apply attacker's blocks to store
+ attacker_state = state.copy()
+ attacker_signed_blocks = []
+
+ while not is_ready_to_justify(spec, attacker_state):
+ attacker_state, signed_blocks, attacker_state = next_slots_with_attestations(
+ spec, attacker_state, 1, True, False)
+ attacker_signed_blocks += signed_blocks
+
+ assert attacker_state.finalized_checkpoint.epoch == 2
+ assert attacker_state.current_justified_checkpoint.epoch == 3
+ assert spec.get_current_epoch(attacker_state) == 4
+
+ # ------------
+
+ # The honest fork sees all except the last block from attacker_signed_blocks
+ # Apply honest fork to store
+ honest_signed_blocks = attacker_signed_blocks[:-1]
+ assert len(honest_signed_blocks) > 0
+
+ for signed_block in honest_signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+
+ last_honest_block = honest_signed_blocks[-1].message
+ honest_state = store.block_states[hash_tree_root(last_honest_block)].copy()
+
+ assert honest_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
+ assert honest_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert spec.get_current_epoch(honest_state) == 4
+
+ # Create & apply an honest block in epoch 5 that can justify epoch 4
+ next_epoch(spec, honest_state)
+ assert spec.get_current_epoch(honest_state) == 5
+
+ honest_block = build_empty_block_for_next_slot(spec, honest_state)
+ honest_block.body.attestations = attacker_signed_blocks[-1].message.body.attestations
+ signed_block = state_transition_and_sign_block(spec, honest_state, honest_block)
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert spec.get_head(store) == hash_tree_root(honest_block)
+ assert is_ready_to_justify(spec, honest_state)
+
+ # ------------
+
+ # When the attacker's block is received, the honest block is still the head
+ # This relies on the honest block's LMD score increasing due to proposer boost
+ yield from tick_and_add_block(spec, store, attacker_signed_blocks[-1], test_steps)
+ assert store.finalized_checkpoint.epoch == 3
+ assert store.justified_checkpoint.epoch == 4
+ assert spec.get_head(store) == hash_tree_root(honest_block)
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_justification_withholding_reverse_order(spec, state):
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ for _ in range(2):
+ next_epoch(spec, state)
+
+ for _ in range(2):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert spec.get_current_epoch(state) == 4
+
+ # ------------
+
+ # Create attacker's fork that can justify epoch 4
+ attacker_state = state.copy()
+ attacker_signed_blocks = []
+
+ while not is_ready_to_justify(spec, attacker_state):
+ attacker_state, signed_blocks, attacker_state = next_slots_with_attestations(
+ spec, attacker_state, 1, True, False)
+ assert len(signed_blocks) == 1
+ attacker_signed_blocks += signed_blocks
+ yield from tick_and_add_block(spec, store, signed_blocks[0], test_steps)
+
+ assert attacker_state.finalized_checkpoint.epoch == 2
+ assert attacker_state.current_justified_checkpoint.epoch == 3
+ assert spec.get_current_epoch(attacker_state) == 4
+ attackers_head = hash_tree_root(attacker_signed_blocks[-1].message)
+ assert spec.get_head(store) == attackers_head
+
+ # ------------
+
+ # The honest fork sees all except the last block from attacker_signed_blocks
+ honest_signed_blocks = attacker_signed_blocks[:-1]
+ assert len(honest_signed_blocks) > 0
+
+ last_honest_block = honest_signed_blocks[-1].message
+ honest_state = store.block_states[hash_tree_root(last_honest_block)].copy()
+
+ assert honest_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
+ assert honest_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert spec.get_current_epoch(honest_state) == 4
+
+ # Create an honest block in epoch 5 that can justify epoch 4
+ next_epoch(spec, honest_state)
+ assert spec.get_current_epoch(honest_state) == 5
+
+ honest_block = build_empty_block_for_next_slot(spec, honest_state)
+ honest_block.body.attestations = attacker_signed_blocks[-1].message.body.attestations
+ signed_block = state_transition_and_sign_block(spec, honest_state, honest_block)
+ assert honest_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
+ assert honest_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert is_ready_to_justify(spec, honest_state)
+
+ # When the honest block is received, the honest block becomes the head
+ # This relies on the honest block's LMD score increasing due to proposer boost
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert store.finalized_checkpoint.epoch == 3
+ assert store.justified_checkpoint.epoch == 4
+ assert spec.get_head(store) == hash_tree_root(honest_block)
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_justification_update_beginning_of_epoch(spec, state):
+ """
+ Check that the store's justified checkpoint is updated when a block containing better justification is
+ revealed at the first slot of an epoch
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ # Create a block that has new justification information contained within it, but don't add to store yet
+ another_state = state.copy()
+ _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, False)
+ assert spec.compute_epoch_at_slot(another_state.slot) == 5
+ assert another_state.current_justified_checkpoint.epoch == 4
+
+ # Tick store to the start of the next epoch
+ slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
+ current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
+
+ # Now add the blocks & check that justification update was triggered
+ for signed_block in signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert spec.get_head(store) == signed_block.message.hash_tree_root()
+ assert store.justified_checkpoint.epoch == 4
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_justification_update_end_of_epoch(spec, state):
+ """
+ Check that the store's justified checkpoint is updated when a block containing better justification is
+ revealed at the last slot of an epoch
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ # Create a block that has new justification information contained within it, but don't add to store yet
+ another_state = state.copy()
+ _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, False)
+ assert spec.compute_epoch_at_slot(another_state.slot) == 5
+ assert another_state.current_justified_checkpoint.epoch == 4
+
+ # Tick store to the last slot of the next epoch
+ slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
+ slot = slot + spec.SLOTS_PER_EPOCH - 1
+ current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
+
+ # Now add the blocks & check that justification update was triggered
+ for signed_block in signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert spec.get_head(store) == signed_block.message.hash_tree_root()
+ assert store.justified_checkpoint.epoch == 4
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_incompatible_justification_update_start_of_epoch(spec, state):
+ """
+ Check that the store's justified checkpoint is updated when a block containing better justification is
+ revealed at the start slot of an epoch, even when the better justified checkpoint is not a descendant of
+ the store's justified checkpoint
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
+
+ # Copy the state to create a fork later
+ another_state = state.copy()
+
+ # Fill epoch 4 and 5
+ for _ in range(2):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5
+ assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4
+
+ # Create a block that has new justification information contained within it, but don't add to store yet
+ next_epoch(spec, another_state)
+ signed_blocks = []
+ _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False)
+ signed_blocks += signed_blocks_temp
+ assert spec.compute_epoch_at_slot(another_state.slot) == 6
+ assert another_state.current_justified_checkpoint.epoch == 3
+ assert another_state.finalized_checkpoint.epoch == 2
+ _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False)
+ signed_blocks += signed_blocks_temp
+ assert spec.compute_epoch_at_slot(another_state.slot) == 7
+ assert another_state.current_justified_checkpoint.epoch == 6
+ assert another_state.finalized_checkpoint.epoch == 2
+ last_block_root = another_state.latest_block_header.parent_root
+
+ # Tick store to the last slot of the next epoch
+ slot = another_state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
+ current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 8
+
+ # Now add the blocks & check that justification update was triggered
+ for signed_block in signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ finalized_slot = spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
+ assert spec.get_ancestor(store, last_block_root, finalized_slot) == state.finalized_checkpoint.root
+ justified_slot = spec.compute_start_slot_at_epoch(state.current_justified_checkpoint.epoch)
+ assert spec.get_ancestor(store, last_block_root, justified_slot) != state.current_justified_checkpoint.root
+ assert store.finalized_checkpoint.epoch == 4
+ assert store.justified_checkpoint.epoch == 6
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_incompatible_justification_update_end_of_epoch(spec, state):
+ """
+ Check that the store's justified checkpoint is updated when a block containing better justification is
+ revealed at the last slot of an epoch, even when the better justified checkpoint is not a descendant of
+ the store's justified checkpoint
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
+
+ # Copy the state to create a fork later
+ another_state = state.copy()
+
+ # Fill epoch 4 and 5
+ for _ in range(2):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5
+ assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4
+
+ # Create a block that has new justification information contained within it, but don't add to store yet
+ next_epoch(spec, another_state)
+ signed_blocks = []
+ _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False)
+ signed_blocks += signed_blocks_temp
+ assert spec.compute_epoch_at_slot(another_state.slot) == 6
+ assert another_state.current_justified_checkpoint.epoch == 3
+ assert another_state.finalized_checkpoint.epoch == 2
+ _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False)
+ signed_blocks += signed_blocks_temp
+ assert spec.compute_epoch_at_slot(another_state.slot) == 7
+ assert another_state.current_justified_checkpoint.epoch == 6
+ assert another_state.finalized_checkpoint.epoch == 2
+ last_block_root = another_state.latest_block_header.parent_root
+
+ # Tick store to the last slot of the next epoch
+ slot = another_state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
+ slot = slot + spec.SLOTS_PER_EPOCH - 1
+ current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 8
+
+ # Now add the blocks & check that justification update was triggered
+ for signed_block in signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ finalized_slot = spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
+ assert spec.get_ancestor(store, last_block_root, finalized_slot) == state.finalized_checkpoint.root
+ justified_slot = spec.compute_start_slot_at_epoch(state.current_justified_checkpoint.epoch)
+ assert spec.get_ancestor(store, last_block_root, justified_slot) != state.current_justified_checkpoint.root
+ assert store.finalized_checkpoint.epoch == 4
+ assert store.justified_checkpoint.epoch == 6
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_justified_update_not_realized_finality(spec, state):
+ """
+ Check that the store updates its justified checkpoint if a higher justified checkpoint is found that is
+ a descendant of the finalized checkpoint, but does not know about the finality
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ # We'll make the current head block the finalized block
+ finalized_root = spec.get_head(store)
+ finalized_block = store.blocks[finalized_root]
+ assert spec.compute_epoch_at_slot(finalized_block.slot) == 4
+ assert spec.get_head(store) == finalized_root
+ # Copy the post-state to use later
+ another_state = state.copy()
+
+ # Create a fork that finalizes our block
+ for _ in range(2):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5
+ assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4
+ assert state.finalized_checkpoint.root == store.finalized_checkpoint.root == finalized_root
+
+ # Create a fork for a better justification that is a descendant of the finalized block,
+ # but does not realize the finality.
+ # Do not add these blocks to the store yet
+ next_epoch(spec, another_state)
+ signed_blocks = []
+ _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False)
+ signed_blocks += signed_blocks_temp
+ assert spec.compute_epoch_at_slot(another_state.slot) == 6
+ assert another_state.current_justified_checkpoint.epoch == 3
+ assert another_state.finalized_checkpoint.epoch == 2
+ _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False)
+ signed_blocks += signed_blocks_temp
+ assert spec.compute_epoch_at_slot(another_state.slot) == 7
+ assert another_state.current_justified_checkpoint.epoch == 6
+
+ # Now add the blocks & check that justification update was triggered
+ for signed_block in signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert store.justified_checkpoint.epoch == 6
+ assert store.finalized_checkpoint.epoch == 4
+ last_block = signed_blocks[-1]
+ last_block_root = last_block.message.hash_tree_root()
+ ancestor_at_finalized_slot = spec.get_ancestor(store, last_block_root, finalized_block.slot)
+ assert ancestor_at_finalized_slot == store.finalized_checkpoint.root
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_justified_update_monotonic(spec, state):
+ """
+ Check that the store does not update it's justified checkpoint with lower justified checkpoints.
+ This testcase checks that the store's justified checkpoint remains the same even when we input a block that has:
+ - a higher finalized checkpoint than the store's finalized checkpoint, and
+ - a lower justified checkpoint than the store's justified checkpoint
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert store.finalized_checkpoint.epoch == 2
+
+ # We'll eventually make the current head block the finalized block
+ finalized_root = spec.get_head(store)
+ finalized_block = store.blocks[finalized_root]
+ assert spec.compute_epoch_at_slot(finalized_block.slot) == 4
+ assert spec.get_head(store) == finalized_root
+ # Copy into another variable so we can use `state` later
+ another_state = state.copy()
+
+ # Create a fork with justification that is a descendant of the finalized block
+ # Do not add these blocks to the store yet
+ next_epoch(spec, another_state)
+ signed_blocks = []
+ _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False)
+ signed_blocks += signed_blocks_temp
+ assert spec.compute_epoch_at_slot(another_state.slot) == 6
+ assert another_state.current_justified_checkpoint.epoch == 3
+ assert another_state.finalized_checkpoint.epoch == 2
+ _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False)
+ signed_blocks += signed_blocks_temp
+ assert spec.compute_epoch_at_slot(another_state.slot) == 7
+ assert another_state.current_justified_checkpoint.epoch == 6
+ assert another_state.finalized_checkpoint.epoch == 2
+
+ # Now add the blocks & check that justification update was triggered
+ for signed_block in signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7
+ assert store.justified_checkpoint.epoch == 6
+ assert store.finalized_checkpoint.epoch == 2
+ last_block = signed_blocks[-1]
+ last_block_root = last_block.message.hash_tree_root()
+ ancestor_at_finalized_slot = spec.get_ancestor(store, last_block_root, finalized_block.slot)
+ assert ancestor_at_finalized_slot == finalized_root
+
+ # Create a fork with lower justification that also finalizes our chosen block
+ for _ in range(2):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7
+ assert state.current_justified_checkpoint.epoch == 5
+ # Check that store's finalized checkpoint is updated
+ assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4
+ # Check that store's justified checkpoint is not updated
+ assert store.justified_checkpoint.epoch == 6
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_justified_update_always_if_better(spec, state):
+ """
+ Check that the store updates it's justified checkpoint with any higher justified checkpoint.
+ This testcase checks that the store's justified checkpoint is updated when we input a block that has:
+ - a lower finalized checkpoint than the store's finalized checkpoint, and
+ - a higher justified checkpoint than the store's justified checkpoint
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert store.finalized_checkpoint.epoch == 2
+
+ # We'll eventually make the current head block the finalized block
+ finalized_root = spec.get_head(store)
+ finalized_block = store.blocks[finalized_root]
+ assert spec.compute_epoch_at_slot(finalized_block.slot) == 4
+ assert spec.get_head(store) == finalized_root
+ # Copy into another variable to use later
+ another_state = state.copy()
+
+ # Create a fork with lower justification that also finalizes our chosen block
+ for _ in range(2):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5
+ assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4
+
+ # Create a fork with higher justification that is a descendant of the finalized block
+ # Do not add these blocks to the store yet
+ next_epoch(spec, another_state)
+ signed_blocks = []
+ _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False)
+ signed_blocks += signed_blocks_temp
+ assert spec.compute_epoch_at_slot(another_state.slot) == 6
+ assert another_state.current_justified_checkpoint.epoch == 3
+ assert another_state.finalized_checkpoint.epoch == 2
+ _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False)
+ signed_blocks += signed_blocks_temp
+ assert spec.compute_epoch_at_slot(another_state.slot) == 7
+ assert another_state.current_justified_checkpoint.epoch == 6
+ assert another_state.finalized_checkpoint.epoch == 2
+
+ # Now add the blocks & check that justification update was triggered
+ for signed_block in signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7
+ assert store.justified_checkpoint.epoch == 6
+ assert store.finalized_checkpoint.epoch == 4
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_pull_up_past_epoch_block(spec, state):
+ """
+ Check that the store pulls-up a block from the past epoch to realize it's justification & finalization information
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert store.finalized_checkpoint.epoch == 2
+
+ # Create a chain within epoch 4 that contains a justification for epoch 4
+ signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True)
+ assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) == 4
+
+ # Tick store to the next epoch
+ next_epoch(spec, state)
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert store.finalized_checkpoint.epoch == 2
+
+ # Add the previously created chain to the store and check for updates
+ for signed_block in signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert spec.get_head(store) == signed_block.message.hash_tree_root()
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
+ assert store.justified_checkpoint.epoch == 4
+ assert store.finalized_checkpoint.epoch == 3
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_not_pull_up_current_epoch_block(spec, state):
+ """
+ Check that the store does not pull-up a block from the current epoch if the previous epoch is not justified
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert store.finalized_checkpoint.epoch == 2
+
+ # Skip to the next epoch
+ next_epoch(spec, state)
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert spec.compute_epoch_at_slot(state.slot) == 5
+
+ # Create a chain within epoch 5 that contains a justification for epoch 5
+ signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True)
+ assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) == 5
+
+ # Add the previously created chain to the store and check that store does not apply pull-up updates
+ for signed_block in signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert spec.get_head(store) == signed_block.message.hash_tree_root()
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
+ assert store.justified_checkpoint.epoch == 3
+ assert store.finalized_checkpoint.epoch == 2
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets([MINIMAL], reason="too slow")
+def test_pull_up_on_tick(spec, state):
+ """
+ Check that the store pulls-up current epoch tips on the on_tick transition to the next epoch
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert store.finalized_checkpoint.epoch == 2
+
+ # Skip to the next epoch
+ next_epoch(spec, state)
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert spec.compute_epoch_at_slot(state.slot) == 5
+
+ # Create a chain within epoch 5 that contains a justification for epoch 5
+ signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True)
+ assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) == 5
+
+ # Add the previously created chain to the store and check that store does not apply pull-up updates,
+ # since the previous epoch was not justified
+ for signed_block in signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert spec.get_head(store) == signed_block.message.hash_tree_root()
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
+ assert store.justified_checkpoint.epoch == 3
+ assert store.finalized_checkpoint.epoch == 2
+
+ # Now tick the store to the next epoch and check that pull-up tip updates were applied
+ next_epoch(spec, state)
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert spec.compute_epoch_at_slot(state.slot) == 6
+ assert store.justified_checkpoint.epoch == 5
+ # There's no new finality, so no finality updates expected
+ assert store.finalized_checkpoint.epoch == 3
+
+ yield 'steps', test_steps
diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py
new file mode 100644
index 0000000000..afff8d4f46
--- /dev/null
+++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py
@@ -0,0 +1,498 @@
+from eth2spec.test.context import (
+ spec_state_test,
+ with_altair_and_later,
+ with_presets,
+)
+from eth2spec.test.helpers.constants import (
+ MINIMAL,
+)
+from eth2spec.test.helpers.attestations import (
+ state_transition_with_full_block,
+ get_valid_attestation,
+ get_valid_attestation_at_slot,
+)
+from eth2spec.test.helpers.block import (
+ build_empty_block,
+ build_empty_block_for_next_slot,
+)
+from eth2spec.test.helpers.fork_choice import (
+ get_genesis_forkchoice_store_and_block,
+ on_tick_and_append_step,
+ add_attestations,
+ tick_and_add_block,
+ apply_next_epoch_with_attestations,
+ find_next_justifying_slot,
+ is_ready_to_justify,
+)
+from eth2spec.test.helpers.state import (
+ state_transition_and_sign_block,
+ next_epoch,
+ next_slot,
+ transition_to,
+)
+
+
+TESTING_PRESETS = [MINIMAL]
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets(TESTING_PRESETS, reason="too slow")
+def test_simple_attempted_reorg_without_enough_ffg_votes(spec, state):
+ """
+ [Case 1]
+
+ { epoch 4 }{ epoch 5 }
+ [c4]<--[a]<--[-]<--[y]
+ ↑____[-]<--[z]
+
+ At c4, c3 is the latest justified checkpoint (or something earlier)
+
+ The block y doesn't have enough votes to justify c4.
+ The block z also doesn't have enough votes to justify c4.
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ # create block_a, it needs 2 more full blocks to justify epoch 4
+ signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True)
+ assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state)
+ for signed_block in signed_blocks[:-2]:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert spec.get_head(store) == signed_block.message.hash_tree_root()
+ state = store.block_states[spec.get_head(store)].copy()
+ assert state.current_justified_checkpoint.epoch == 3
+ next_slot(spec, state)
+ state_a = state.copy()
+
+ # to test the "no withholding" situation, temporarily store the blocks in lists
+ signed_blocks_of_y = []
+ signed_blocks_of_z = []
+
+ # add an empty block on chain y
+ block_y = build_empty_block_for_next_slot(spec, state)
+ signed_block_y = state_transition_and_sign_block(spec, state, block_y)
+ signed_blocks_of_y.append(signed_block_y)
+
+ # chain y has some on-chain attestations, but not enough to justify c4
+ signed_block_y = state_transition_with_full_block(spec, state, True, True)
+ assert not is_ready_to_justify(spec, state)
+ signed_blocks_of_y.append(signed_block_y)
+ assert store.justified_checkpoint.epoch == 3
+
+ state = state_a.copy()
+ signed_block_z = None
+ # add one block on chain z, which is not enough to justify c4
+ attestation = get_valid_attestation(spec, state, slot=state.slot, signed=True)
+ block_z = build_empty_block_for_next_slot(spec, state)
+ block_z.body.attestations = [attestation]
+ signed_block_z = state_transition_and_sign_block(spec, state, block_z)
+ signed_blocks_of_z.append(signed_block_z)
+
+ # add an empty block on chain z
+ block_z = build_empty_block_for_next_slot(spec, state)
+ signed_block_z = state_transition_and_sign_block(spec, state, block_z)
+ signed_blocks_of_z.append(signed_block_z)
+
+ # ensure z couldn't justify c4
+ assert not is_ready_to_justify(spec, state)
+
+ # apply blocks to store
+ # (i) slot block_a.slot + 1
+ signed_block_y = signed_blocks_of_y.pop(0)
+ yield from tick_and_add_block(spec, store, signed_block_y, test_steps)
+ # apply block of chain `z`
+ signed_block_z = signed_blocks_of_z.pop(0)
+ yield from tick_and_add_block(spec, store, signed_block_z, test_steps)
+
+ # (ii) slot block_a.slot + 2
+ # apply block of chain `z`
+ signed_block_z = signed_blocks_of_z.pop(0)
+ yield from tick_and_add_block(spec, store, signed_block_z, test_steps)
+ # apply block of chain `y`
+ signed_block_y = signed_blocks_of_y.pop(0)
+ yield from tick_and_add_block(spec, store, signed_block_y, test_steps)
+ # chain `y` remains the winner since it arrives earlier than `z`
+ assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
+ assert len(signed_blocks_of_y) == len(signed_blocks_of_z) == 0
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+
+ # tick to the prior of the epoch boundary
+ slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - 1
+ current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ # chain `y` reminds the winner
+ assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
+
+ # to next block
+ next_epoch(spec, state)
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
+ assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ yield 'steps', test_steps
+
+
+def _run_delayed_justification(spec, state, attemped_reorg, is_justifying_previous_epoch):
+ """
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 2
+ for _ in range(2):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ if is_justifying_previous_epoch:
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, False, False, test_steps=test_steps)
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2
+ else:
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ if is_justifying_previous_epoch:
+ # try to find the block that can justify epoch 3
+ signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, False, True)
+ else:
+ # try to find the block that can justify epoch 4
+ signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True)
+
+ assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state)
+ for signed_block in signed_blocks:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ spec.get_head(store) == signed_block.message.hash_tree_root()
+ state = store.block_states[spec.get_head(store)].copy()
+ if is_justifying_previous_epoch:
+ assert state.current_justified_checkpoint.epoch == 2
+ else:
+ assert state.current_justified_checkpoint.epoch == 3
+
+ assert is_ready_to_justify(spec, state)
+ state_b = state.copy()
+
+ # add chain y
+ if is_justifying_previous_epoch:
+ signed_block_y = state_transition_with_full_block(spec, state, False, True)
+ else:
+ signed_block_y = state_transition_with_full_block(spec, state, True, True)
+ yield from tick_and_add_block(spec, store, signed_block_y, test_steps)
+ assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
+ if is_justifying_previous_epoch:
+ assert store.justified_checkpoint.epoch == 2
+ else:
+ assert store.justified_checkpoint.epoch == 3
+
+ # add attestations of y
+ temp_state = state.copy()
+ next_slot(spec, temp_state)
+ attestations_for_y = list(get_valid_attestation_at_slot(temp_state, spec, signed_block_y.message.slot))
+ current_time = temp_state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ yield from add_attestations(spec, store, attestations_for_y, test_steps)
+ assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
+
+ if attemped_reorg:
+ # add chain z
+ state = state_b.copy()
+ slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - 1
+ transition_to(spec, state, slot)
+ block_z = build_empty_block_for_next_slot(spec, state)
+ assert spec.compute_epoch_at_slot(block_z.slot) == 5
+ signed_block_z = state_transition_and_sign_block(spec, state, block_z)
+ yield from tick_and_add_block(spec, store, signed_block_z, test_steps)
+ else:
+ # next epoch
+ state = state_b.copy()
+ next_epoch(spec, state)
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+
+ # no reorg
+ assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
+ if is_justifying_previous_epoch:
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ else:
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets(TESTING_PRESETS, reason="too slow")
+def test_simple_attempted_reorg_delayed_justification_current_epoch(spec, state):
+ """
+ [Case 2]
+
+ { epoch 4 }{ epoch 5 }
+ [c4]<--[b]<--[y]
+ ↑______________[z]
+ At c4, c3 is the latest justified checkpoint (or something earlier)
+
+ block_b: the block that can justify c4.
+ z: the child of block of x at the first slot of epoch 5.
+ block z can reorg the chain from block y.
+ """
+ yield from _run_delayed_justification(spec, state, attemped_reorg=True, is_justifying_previous_epoch=False)
+
+
+def _run_include_votes_of_another_empty_chain(spec, state, enough_ffg, is_justifying_previous_epoch):
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 2
+ for _ in range(2):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+ if is_justifying_previous_epoch:
+ block_a = build_empty_block_for_next_slot(spec, state)
+ signed_block_a = state_transition_and_sign_block(spec, state, block_a)
+ yield from tick_and_add_block(spec, store, signed_block_a, test_steps)
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2
+ else:
+ # fill one more epoch
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+ signed_block_a = state_transition_with_full_block(spec, state, True, True)
+ yield from tick_and_add_block(spec, store, signed_block_a, test_steps)
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ spec.get_head(store) == signed_block_a.message.hash_tree_root()
+
+ state = store.block_states[spec.get_head(store)].copy()
+ if is_justifying_previous_epoch:
+ assert state.current_justified_checkpoint.epoch == 2
+ else:
+ assert state.current_justified_checkpoint.epoch == 3
+ state_a = state.copy()
+
+ if is_justifying_previous_epoch:
+ # try to find the block that can justify epoch 3
+ _, justifying_slot = find_next_justifying_slot(spec, state, False, True)
+ else:
+ # try to find the block that can justify epoch 4
+ _, justifying_slot = find_next_justifying_slot(spec, state, True, True)
+
+ last_slot_of_z = justifying_slot if enough_ffg else justifying_slot - 1
+ last_slot_of_y = justifying_slot if is_justifying_previous_epoch else last_slot_of_z - 1
+
+ # to test the "no withholding" situation, temporarily store the blocks in lists
+ signed_blocks_of_y = []
+
+ # build an empty chain to the slot prior epoch boundary
+ signed_blocks_of_empty_chain = []
+ states_of_empty_chain = []
+
+ for slot in range(state.slot + 1, last_slot_of_y + 1):
+ block = build_empty_block(spec, state, slot=slot)
+ signed_block = state_transition_and_sign_block(spec, state, block)
+ signed_blocks_of_empty_chain.append(signed_block)
+ states_of_empty_chain.append(state.copy())
+ signed_blocks_of_y.append(signed_block)
+
+ signed_block_y = signed_blocks_of_empty_chain[-1]
+
+ # create 2/3 votes for the empty chain
+ attestations_for_y = []
+ # target_is_current = not is_justifying_previous_epoch
+ attestations = list(get_valid_attestation_at_slot(state, spec, state_a.slot))
+ attestations_for_y.append(attestations)
+ for state in states_of_empty_chain:
+ attestations = list(get_valid_attestation_at_slot(state, spec, state.slot))
+ attestations_for_y.append(attestations)
+
+ state = state_a.copy()
+ signed_block_z = None
+
+ for slot in range(state_a.slot + 1, last_slot_of_z + 1):
+ # apply chain y, the empty chain
+ if slot <= last_slot_of_y and len(signed_blocks_of_y) > 0:
+ signed_block_y = signed_blocks_of_y.pop(0)
+ assert signed_block_y.message.slot == slot
+ yield from tick_and_add_block(spec, store, signed_block_y, test_steps)
+
+ # apply chain z, a fork chain that includes these attestations_for_y
+ block = build_empty_block(spec, state, slot=slot)
+ if (
+ len(attestations_for_y) > 0 and (
+ (not is_justifying_previous_epoch)
+ or (is_justifying_previous_epoch and attestations_for_y[0][0].data.slot == slot - 5)
+ )
+ ):
+ block.body.attestations = attestations_for_y.pop(0)
+ signed_block_z = state_transition_and_sign_block(spec, state, block)
+ if signed_block_y != signed_block_z:
+ yield from tick_and_add_block(spec, store, signed_block_z, test_steps)
+ if is_ready_to_justify(spec, state):
+ break
+
+ assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
+
+ if is_justifying_previous_epoch:
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2
+ else:
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ if enough_ffg:
+ assert is_ready_to_justify(spec, state)
+ else:
+ assert not is_ready_to_justify(spec, state)
+
+ # to next epoch
+ next_epoch(spec, state)
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
+
+ if enough_ffg:
+ # reorg
+ assert spec.get_head(store) == signed_block_z.message.hash_tree_root()
+ if is_justifying_previous_epoch:
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+ else:
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
+ else:
+ # no reorg
+ assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets(TESTING_PRESETS, reason="too slow")
+def test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch(spec, state):
+ """
+ [Case 3]
+ """
+ yield from _run_include_votes_of_another_empty_chain(
+ spec, state, enough_ffg=True, is_justifying_previous_epoch=False)
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets(TESTING_PRESETS, reason="too slow")
+def test_include_votes_another_empty_chain_without_enough_ffg_votes_current_epoch(spec, state):
+ """
+ [Case 4]
+ """
+ yield from _run_include_votes_of_another_empty_chain(
+ spec, state, enough_ffg=False, is_justifying_previous_epoch=False)
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets(TESTING_PRESETS, reason="too slow")
+def test_delayed_justification_current_epoch(spec, state):
+ """
+ [Case 5]
+
+ To compare with ``test_simple_attempted_reorg_delayed_justification_current_epoch``,
+ this is the basic case if there is no chain z
+
+ { epoch 4 }{ epoch 5 }
+ [c4]<--[b]<--[y]
+
+ At c4, c3 is the latest justified checkpoint.
+
+ block_b: the block that can justify c4.
+ """
+ yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=False)
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets(TESTING_PRESETS, reason="too slow")
+def test_delayed_justification_previous_epoch(spec, state):
+ """
+ [Case 6]
+
+ Similar to ``test_delayed_justification_current_epoch``,
+ but includes attestations during epoch N to justify checkpoint N-1.
+
+ { epoch 3 }{ epoch 4 }{ epoch 5 }
+ [c3]<---------------[c4]---[b]<---------------------------------[y]
+
+ """
+ yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=True)
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets(TESTING_PRESETS, reason="too slow")
+def test_simple_attempted_reorg_delayed_justification_previous_epoch(spec, state):
+ """
+ [Case 7]
+
+ Similar to ``test_simple_attempted_reorg_delayed_justification_current_epoch``,
+ but includes attestations during epoch N to justify checkpoint N-1.
+
+ { epoch 3 }{ epoch 4 }{ epoch 5 }
+ [c3]<---------------[c4]<--[b]<--[y]
+ ↑______________[z]
+
+ At c4, c2 is the latest justified checkpoint.
+
+ block_b: the block that can justify c3.
+ z: the child of block of x at the first slot of epoch 5.
+ block z can reorg the chain from block y.
+ """
+ yield from _run_delayed_justification(spec, state, attemped_reorg=True, is_justifying_previous_epoch=True)
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets(TESTING_PRESETS, reason="too slow")
+def test_include_votes_another_empty_chain_with_enough_ffg_votes_previous_epoch(spec, state):
+ """
+ [Case 8]
+
+ Similar to ``test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch``,
+ but includes attestations during epoch N to justify checkpoint N-1.
+
+ """
+ yield from _run_include_votes_of_another_empty_chain(
+ spec, state, enough_ffg=True, is_justifying_previous_epoch=True)
diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py
new file mode 100644
index 0000000000..61926875ad
--- /dev/null
+++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py
@@ -0,0 +1,205 @@
+from eth2spec.test.context import (
+ spec_state_test,
+ with_altair_and_later,
+ with_presets,
+)
+from eth2spec.test.helpers.constants import (
+ MINIMAL,
+)
+from eth2spec.test.helpers.attestations import (
+ state_transition_with_full_block,
+)
+from eth2spec.test.helpers.block import (
+ build_empty_block_for_next_slot,
+)
+from eth2spec.test.helpers.fork_choice import (
+ get_genesis_forkchoice_store_and_block,
+ on_tick_and_append_step,
+ tick_and_add_block,
+ apply_next_epoch_with_attestations,
+ find_next_justifying_slot,
+)
+from eth2spec.test.helpers.state import (
+ state_transition_and_sign_block,
+ next_epoch,
+)
+
+
+TESTING_PRESETS = [MINIMAL]
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets(TESTING_PRESETS, reason="too slow")
+def test_withholding_attack(spec, state):
+ """
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ # Create the attack block that includes justifying attestations for epoch 4
+ # This block is withheld & revealed only in epoch 5
+ signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, False)
+ assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state)
+ assert len(signed_blocks) > 1
+ signed_attack_block = signed_blocks[-1]
+ for signed_block in signed_blocks[:-1]:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert spec.get_head(store) == signed_block.message.hash_tree_root()
+ assert spec.get_head(store) == signed_blocks[-2].message.hash_tree_root()
+ state = store.block_states[spec.get_head(store)].copy()
+ assert spec.compute_epoch_at_slot(state.slot) == 4
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ # Create an honest chain in epoch 5 that includes the justifying attestations from the attack block
+ next_epoch(spec, state)
+ assert spec.compute_epoch_at_slot(state.slot) == 5
+ assert state.current_justified_checkpoint.epoch == 3
+ # Create two block in the honest chain with full attestations, and add to the store
+ for _ in range(2):
+ signed_block = state_transition_with_full_block(spec, state, True, False)
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ # Create final block in the honest chain that includes the justifying attestations from the attack block
+ honest_block = build_empty_block_for_next_slot(spec, state)
+ honest_block.body.attestations = signed_attack_block.message.body.attestations
+ signed_honest_block = state_transition_and_sign_block(spec, state, honest_block)
+ # Add the honest block to the store
+ yield from tick_and_add_block(spec, store, signed_honest_block, test_steps)
+ assert spec.get_head(store) == signed_honest_block.message.hash_tree_root()
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ # Tick to the next slot so proposer boost is not a factor in choosing the head
+ current_time = (honest_block.slot + 1) * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert spec.get_head(store) == signed_honest_block.message.hash_tree_root()
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ # Upon revealing the withheld attack block, the honest block should still be the head
+ yield from tick_and_add_block(spec, store, signed_attack_block, test_steps)
+ assert spec.get_head(store) == signed_honest_block.message.hash_tree_root()
+ # As a side effect of the pull-up logic, the attack block is pulled up and store.justified_checkpoint is updated
+ assert store.justified_checkpoint.epoch == 4
+
+ # Even after going to the next epoch, the honest block should remain the head
+ slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
+ current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
+ assert spec.get_head(store) == signed_honest_block.message.hash_tree_root()
+
+ yield 'steps', test_steps
+
+
+@with_altair_and_later
+@spec_state_test
+@with_presets(TESTING_PRESETS, reason="too slow")
+def test_withholding_attack_unviable_honest_chain(spec, state):
+ """
+ Checks that the withholding attack succeeds for one epoch if the honest chain has a voting source beyond
+ two epochs ago.
+ """
+ test_steps = []
+ # Initialization
+ store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
+ yield 'anchor_state', state
+ yield 'anchor_block', anchor_block
+ current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert store.time == current_time
+
+ next_epoch(spec, state)
+ on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
+
+ # Fill epoch 1 to 3
+ for _ in range(3):
+ state, store, _ = yield from apply_next_epoch_with_attestations(
+ spec, state, store, True, True, test_steps=test_steps)
+
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ next_epoch(spec, state)
+ assert spec.compute_epoch_at_slot(state.slot) == 5
+
+ # Create the attack block that includes justifying attestations for epoch 5
+ # This block is withheld & revealed only in epoch 6
+ signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, False)
+ assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state)
+ assert len(signed_blocks) > 1
+ signed_attack_block = signed_blocks[-1]
+ for signed_block in signed_blocks[:-1]:
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ assert spec.get_head(store) == signed_block.message.hash_tree_root()
+ assert spec.get_head(store) == signed_blocks[-2].message.hash_tree_root()
+ state = store.block_states[spec.get_head(store)].copy()
+ assert spec.compute_epoch_at_slot(state.slot) == 5
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ # Create an honest chain in epoch 6 that includes the justifying attestations from the attack block
+ next_epoch(spec, state)
+ assert spec.compute_epoch_at_slot(state.slot) == 6
+ assert state.current_justified_checkpoint.epoch == 3
+ # Create two block in the honest chain with full attestations, and add to the store
+ for _ in range(2):
+ signed_block = state_transition_with_full_block(spec, state, True, False)
+ assert state.current_justified_checkpoint.epoch == 3
+ yield from tick_and_add_block(spec, store, signed_block, test_steps)
+ # Create final block in the honest chain that includes the justifying attestations from the attack block
+ honest_block = build_empty_block_for_next_slot(spec, state)
+ honest_block.body.attestations = signed_attack_block.message.body.attestations
+ signed_honest_block = state_transition_and_sign_block(spec, state, honest_block)
+ honest_block_root = signed_honest_block.message.hash_tree_root()
+ assert state.current_justified_checkpoint.epoch == 3
+ # Add the honest block to the store
+ yield from tick_and_add_block(spec, store, signed_honest_block, test_steps)
+ current_epoch = spec.compute_epoch_at_slot(spec.get_current_slot(store))
+ assert current_epoch == 6
+ # assert store.voting_source[honest_block_root].epoch == 3
+ assert spec.get_head(store) == honest_block_root
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ # Tick to the next slot so proposer boost is not a factor in choosing the head
+ current_time = (honest_block.slot + 1) * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert spec.get_head(store) == honest_block_root
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
+ assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
+
+ # Upon revealing the withheld attack block, it should become the head
+ yield from tick_and_add_block(spec, store, signed_attack_block, test_steps)
+ # The attack block is pulled up and store.justified_checkpoint is updated
+ assert store.justified_checkpoint.epoch == 5
+ attack_block_root = signed_attack_block.message.hash_tree_root()
+ assert spec.get_head(store) == attack_block_root
+
+ # After going to the next epoch, the honest block should become the head
+ slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
+ current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
+ on_tick_and_append_step(spec, store, current_time, test_steps)
+ assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7
+ # assert store.voting_source[honest_block_root].epoch == 5
+ assert spec.get_head(store) == honest_block_root
+
+ yield 'steps', test_steps
diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py
deleted file mode 100644
index 92382c884b..0000000000
--- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py
+++ /dev/null
@@ -1,87 +0,0 @@
-from copy import deepcopy
-
-from eth2spec.utils.ssz.ssz_impl import hash_tree_root
-from eth2spec.test.context import (
- spec_state_test,
- with_all_phases,
-)
-from eth2spec.test.helpers.block import (
- build_empty_block_for_next_slot,
-)
-from eth2spec.test.helpers.fork_choice import (
- get_genesis_forkchoice_store,
- run_on_block,
- apply_next_epoch_with_attestations,
-)
-from eth2spec.test.helpers.state import (
- next_epoch,
- state_transition_and_sign_block,
-)
-
-
-@with_all_phases
-@spec_state_test
-def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state):
- """
- NOTE: test_new_justified_is_later_than_store_justified also tests best_justified_checkpoint
- """
- # Initialization
- store = get_genesis_forkchoice_store(spec, state)
-
- next_epoch(spec, state)
- spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT)
- state, store, last_signed_block = yield from apply_next_epoch_with_attestations(
- spec, state, store, True, False)
- last_block_root = hash_tree_root(last_signed_block.message)
-
- # NOTE: Mock fictitious justified checkpoint in store
- store.justified_checkpoint = spec.Checkpoint(
- epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot),
- root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000")
- )
-
- next_epoch(spec, state)
- spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT)
-
- # Create new higher justified checkpoint not in branch of store's justified checkpoint
- just_block = build_empty_block_for_next_slot(spec, state)
- store.blocks[just_block.hash_tree_root()] = just_block
-
- # Step time past safe slots
- spec.on_tick(store, store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT)
- assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
-
- previously_finalized = store.finalized_checkpoint
- previously_justified = store.justified_checkpoint
-
- # Add a series of new blocks with "better" justifications
- best_justified_checkpoint = spec.Checkpoint(epoch=0)
- for i in range(3, 0, -1):
- # Mutate store
- just_state = store.block_states[last_block_root]
- new_justified = spec.Checkpoint(
- epoch=previously_justified.epoch + i,
- root=just_block.hash_tree_root(),
- )
- if new_justified.epoch > best_justified_checkpoint.epoch:
- best_justified_checkpoint = new_justified
-
- just_state.current_justified_checkpoint = new_justified
-
- block = build_empty_block_for_next_slot(spec, just_state)
- signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block)
-
- # NOTE: Mock store so that the modified state could be accessed
- parent_block = store.blocks[last_block_root].copy()
- parent_block.state_root = just_state.hash_tree_root()
- store.blocks[block.parent_root] = parent_block
- store.block_states[block.parent_root] = just_state.copy()
- assert block.parent_root in store.blocks.keys()
- assert block.parent_root in store.block_states.keys()
-
- run_on_block(spec, store, signed_block)
-
- assert store.finalized_checkpoint == previously_finalized
- assert store.justified_checkpoint == previously_justified
- # ensure the best from the series was stored
- assert store.best_justified_checkpoint == best_justified_checkpoint
diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py
index 0d9f6ddf54..33d1bbac44 100644
--- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py
+++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py
@@ -18,7 +18,6 @@ def run_on_tick(spec, store, time, new_justified_checkpoint=False):
assert store.time == time
if new_justified_checkpoint:
- assert store.justified_checkpoint == store.best_justified_checkpoint
assert store.justified_checkpoint.epoch > previous_justified_checkpoint.epoch
assert store.justified_checkpoint.root != previous_justified_checkpoint.root
else:
@@ -32,12 +31,12 @@ def test_basic(spec, state):
run_on_tick(spec, store, store.time + 1)
+"""
@with_all_phases
@spec_state_test
def test_update_justified_single_on_store_finalized_chain(spec, state):
store = get_genesis_forkchoice_store(spec, state)
- # [Mock store.best_justified_checkpoint]
# Create a block at epoch 1
next_epoch(spec, state)
block = build_empty_block_for_next_slot(spec, state)
@@ -58,8 +57,6 @@ def test_update_justified_single_on_store_finalized_chain(spec, state):
state_transition_and_sign_block(spec, state, block)
store.blocks[block.hash_tree_root()] = block
store.block_states[block.hash_tree_root()] = state
- # Mock store.best_justified_checkpoint
- store.best_justified_checkpoint = state.current_justified_checkpoint.copy()
run_on_tick(
spec,
@@ -67,6 +64,7 @@ def test_update_justified_single_on_store_finalized_chain(spec, state):
store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT,
new_justified_checkpoint=True
)
+"""
@with_all_phases
@@ -89,7 +87,6 @@ def test_update_justified_single_not_on_store_finalized_chain(spec, state):
root=block.hash_tree_root(),
)
- # [Mock store.best_justified_checkpoint]
# Create a block at epoch 1
state = init_state.copy()
next_epoch(spec, state)
@@ -112,79 +109,9 @@ def test_update_justified_single_not_on_store_finalized_chain(spec, state):
state_transition_and_sign_block(spec, state, block)
store.blocks[block.hash_tree_root()] = block.copy()
store.block_states[block.hash_tree_root()] = state.copy()
- # Mock store.best_justified_checkpoint
- store.best_justified_checkpoint = state.current_justified_checkpoint.copy()
run_on_tick(
spec,
store,
store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT,
)
-
-
-@with_all_phases
-@spec_state_test
-def test_no_update_same_slot_at_epoch_boundary(spec, state):
- store = get_genesis_forkchoice_store(spec, state)
- seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
-
- store.best_justified_checkpoint = spec.Checkpoint(
- epoch=store.justified_checkpoint.epoch + 1,
- root=b'\x55' * 32,
- )
-
- # set store time to already be at epoch boundary
- store.time = seconds_per_epoch
-
- run_on_tick(spec, store, store.time + 1)
-
-
-@with_all_phases
-@spec_state_test
-def test_no_update_not_epoch_boundary(spec, state):
- store = get_genesis_forkchoice_store(spec, state)
-
- store.best_justified_checkpoint = spec.Checkpoint(
- epoch=store.justified_checkpoint.epoch + 1,
- root=b'\x55' * 32,
- )
-
- run_on_tick(spec, store, store.time + spec.config.SECONDS_PER_SLOT)
-
-
-@with_all_phases
-@spec_state_test
-def test_no_update_new_justified_equal_epoch(spec, state):
- store = get_genesis_forkchoice_store(spec, state)
- seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
-
- store.best_justified_checkpoint = spec.Checkpoint(
- epoch=store.justified_checkpoint.epoch + 1,
- root=b'\x55' * 32,
- )
-
- store.justified_checkpoint = spec.Checkpoint(
- epoch=store.best_justified_checkpoint.epoch,
- root=b'\44' * 32,
- )
-
- run_on_tick(spec, store, store.time + seconds_per_epoch)
-
-
-@with_all_phases
-@spec_state_test
-def test_no_update_new_justified_later_epoch(spec, state):
- store = get_genesis_forkchoice_store(spec, state)
- seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
-
- store.best_justified_checkpoint = spec.Checkpoint(
- epoch=store.justified_checkpoint.epoch + 1,
- root=b'\x55' * 32,
- )
-
- store.justified_checkpoint = spec.Checkpoint(
- epoch=store.best_justified_checkpoint.epoch + 1,
- root=b'\44' * 32,
- )
-
- run_on_tick(spec, store, store.time + seconds_per_epoch)
diff --git a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py
index 35ddbc330a..c164515103 100644
--- a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py
+++ b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py
@@ -235,7 +235,7 @@ def random_block_capella(spec, state, signed_blocks, scenario_state, rng=Random(
def random_block_deneb(spec, state, signed_blocks, scenario_state, rng=Random(3456)):
block = random_block_capella(spec, state, signed_blocks, scenario_state)
# TODO: more commitments. blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK]
- opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=1)
+ opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=1)
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
block.body.blob_kzg_commitments = blob_kzg_commitments
diff --git a/tests/core/pyspec/eth2spec/utils/bls.py b/tests/core/pyspec/eth2spec/utils/bls.py
index aa060f4f9a..7dd9597ebe 100644
--- a/tests/core/pyspec/eth2spec/utils/bls.py
+++ b/tests/core/pyspec/eth2spec/utils/bls.py
@@ -1,28 +1,50 @@
from py_ecc.bls import G2ProofOfPossession as py_ecc_bls
from py_ecc.bls.g2_primatives import signature_to_G2 as _signature_to_G2
from py_ecc.optimized_bls12_381 import ( # noqa: F401
- G1,
- G2,
- Z1,
- Z2,
- FQ,
- add,
- multiply,
- neg,
- pairing,
- final_exponentiate,
- FQ12
+ G1 as py_ecc_G1,
+ G2 as py_ecc_G2,
+ Z1 as py_ecc_Z1,
+ add as py_ecc_add,
+ multiply as py_ecc_mul,
+ neg as py_ecc_neg,
+ pairing as py_ecc_pairing,
+ final_exponentiate as py_ecc_final_exponentiate,
+ FQ12 as py_ecc_GT,
)
from py_ecc.bls.g2_primitives import ( # noqa: F401
- G1_to_pubkey as G1_to_bytes48,
- pubkey_to_G1 as bytes48_to_G1,
- G2_to_signature as G2_to_bytes96,
- signature_to_G2 as bytes96_to_G2,
+ curve_order as BLS_MODULUS,
+ G1_to_pubkey as py_ecc_G1_to_bytes48,
+ pubkey_to_G1 as py_ecc_bytes48_to_G1,
+ G2_to_signature as py_ecc_G2_to_bytes96,
+ signature_to_G2 as py_ecc_bytes96_to_G2,
+)
+from py_arkworks_bls12381 import (
+ G1Point as arkworks_G1,
+ G2Point as arkworks_G2,
+ Scalar as arkworks_Scalar,
+ GT as arkworks_GT,
)
import milagro_bls_binding as milagro_bls # noqa: F401 for BLS switching option
+import py_arkworks_bls12381 as arkworks_bls # noqa: F401 for BLS switching option
+
+
+class fastest_bls:
+ G1 = arkworks_G1
+ G2 = arkworks_G2
+ Scalar = arkworks_Scalar
+ GT = arkworks_GT
+ _AggregatePKs = milagro_bls._AggregatePKs
+ Sign = milagro_bls.Sign
+ Verify = milagro_bls.Verify
+ Aggregate = milagro_bls.Aggregate
+ AggregateVerify = milagro_bls.AggregateVerify
+ FastAggregateVerify = milagro_bls.FastAggregateVerify
+ SkToPk = milagro_bls.SkToPk
+
+
# Flag to make BLS active or not. Used for testing, do not ignore BLS in production unless you know what you are doing.
bls_active = True
@@ -43,6 +65,14 @@ def use_milagro():
bls = milagro_bls
+def use_arkworks():
+ """
+ Shortcut to use Arkworks as BLS library
+ """
+ global bls
+ bls = arkworks_bls
+
+
def use_py_ecc():
"""
Shortcut to use Py-ecc as BLS library
@@ -51,6 +81,14 @@ def use_py_ecc():
bls = py_ecc_bls
+def use_fastest():
+ """
+ Shortcut to use Milagro for signatures and Arkworks for other BLS operations
+ """
+ global bls
+ bls = fastest_bls
+
+
def only_with_bls(alt_return=None):
"""
Decorator factory to make a function only run when BLS is active. Otherwise return the default.
@@ -68,7 +106,10 @@ def entry(*args, **kw):
@only_with_bls(alt_return=True)
def Verify(PK, message, signature):
try:
- result = bls.Verify(PK, message, signature)
+ if bls == arkworks_bls: # no signature API in arkworks
+ result = py_ecc_bls.Verify(PK, message, signature)
+ else:
+ result = bls.Verify(PK, message, signature)
except Exception:
result = False
finally:
@@ -78,7 +119,10 @@ def Verify(PK, message, signature):
@only_with_bls(alt_return=True)
def AggregateVerify(pubkeys, messages, signature):
try:
- result = bls.AggregateVerify(list(pubkeys), list(messages), signature)
+ if bls == arkworks_bls: # no signature API in arkworks
+ result = py_ecc_bls.AggregateVerify(list(pubkeys), list(messages), signature)
+ else:
+ result = bls.AggregateVerify(list(pubkeys), list(messages), signature)
except Exception:
result = False
finally:
@@ -88,7 +132,10 @@ def AggregateVerify(pubkeys, messages, signature):
@only_with_bls(alt_return=True)
def FastAggregateVerify(pubkeys, message, signature):
try:
- result = bls.FastAggregateVerify(list(pubkeys), message, signature)
+ if bls == arkworks_bls: # no signature API in arkworks
+ result = py_ecc_bls.FastAggregateVerify(list(pubkeys), message, signature)
+ else:
+ result = bls.FastAggregateVerify(list(pubkeys), message, signature)
except Exception:
result = False
finally:
@@ -97,12 +144,16 @@ def FastAggregateVerify(pubkeys, message, signature):
@only_with_bls(alt_return=STUB_SIGNATURE)
def Aggregate(signatures):
+ if bls == arkworks_bls: # no signature API in arkworks
+ return py_ecc_bls.Aggregate(signatures)
return bls.Aggregate(signatures)
@only_with_bls(alt_return=STUB_SIGNATURE)
def Sign(SK, message):
- if bls == py_ecc_bls:
+ if bls == arkworks_bls: # no signature API in arkworks
+ return py_ecc_bls.Sign(SK, message)
+ elif bls == py_ecc_bls:
return bls.Sign(SK, message)
else:
return bls.Sign(SK.to_bytes(32, 'big'), message)
@@ -121,24 +172,143 @@ def AggregatePKs(pubkeys):
# milagro_bls._AggregatePKs checks KeyValidate internally
pass
+ if bls == arkworks_bls: # no signature API in arkworks
+ return py_ecc_bls._AggregatePKs(list(pubkeys))
+
return bls._AggregatePKs(list(pubkeys))
@only_with_bls(alt_return=STUB_SIGNATURE)
def SkToPk(SK):
- if bls == py_ecc_bls:
- return bls.SkToPk(SK)
+ if bls == py_ecc_bls or bls == arkworks_bls: # no signature API in arkworks
+ return py_ecc_bls.SkToPk(SK)
else:
return bls.SkToPk(SK.to_bytes(32, 'big'))
def pairing_check(values):
- p_q_1, p_q_2 = values
- final_exponentiation = final_exponentiate(
- pairing(p_q_1[1], p_q_1[0], final_exponentiate=False)
- * pairing(p_q_2[1], p_q_2[0], final_exponentiate=False)
- )
- return final_exponentiation == FQ12.one()
+ if bls == arkworks_bls or bls == fastest_bls:
+ p_q_1, p_q_2 = values
+ g1s = [p_q_1[0], p_q_2[0]]
+ g2s = [p_q_1[1], p_q_2[1]]
+ return arkworks_GT.multi_pairing(g1s, g2s) == arkworks_GT.one()
+ else:
+ p_q_1, p_q_2 = values
+ final_exponentiation = py_ecc_final_exponentiate(
+ py_ecc_pairing(p_q_1[1], p_q_1[0], final_exponentiate=False)
+ * py_ecc_pairing(p_q_2[1], p_q_2[0], final_exponentiate=False)
+ )
+ return final_exponentiation == py_ecc_GT.one()
+
+
+def add(lhs, rhs):
+ """
+ Performs point addition of `lhs` and `rhs`.
+ The points can either be in G1 or G2.
+ """
+ if bls == arkworks_bls or bls == fastest_bls:
+ return lhs + rhs
+ return py_ecc_add(lhs, rhs)
+
+
+def multiply(point, scalar):
+ """
+ Performs Scalar multiplication between
+ `point` and `scalar`.
+ `point` can either be in G1 or G2
+ """
+ if bls == arkworks_bls or bls == fastest_bls:
+ int_as_bytes = scalar.to_bytes(32, 'little')
+ scalar = arkworks_Scalar.from_le_bytes(int_as_bytes)
+ return point * scalar
+ return py_ecc_mul(point, scalar)
+
+
+def neg(point):
+ """
+ Returns the point negation of `point`
+ `point` can either be in G1 or G2
+ """
+ if bls == arkworks_bls or bls == fastest_bls:
+ return -point
+ return py_ecc_neg(point)
+
+
+def Z1():
+ """
+ Returns the identity point in G1
+ """
+ if bls == arkworks_bls or bls == fastest_bls:
+ return arkworks_G1.identity()
+ return py_ecc_Z1
+
+
+def G1():
+ """
+ Returns the chosen generator point in G1
+ """
+ if bls == arkworks_bls or bls == fastest_bls:
+ return arkworks_G1()
+ return py_ecc_G1
+
+
+def G2():
+ """
+ Returns the chosen generator point in G2
+ """
+ if bls == arkworks_bls or bls == fastest_bls:
+ return arkworks_G2()
+ return py_ecc_G2
+
+
+def G1_to_bytes48(point):
+ """
+ Serializes a point in G1.
+ Returns a bytearray of size 48 as
+ we use the compressed format
+ """
+ if bls == arkworks_bls or bls == fastest_bls:
+ return bytes(point.to_compressed_bytes())
+ return py_ecc_G1_to_bytes48(point)
+
+
+def G2_to_bytes96(point):
+ """
+ Serializes a point in G2.
+ Returns a bytearray of size 96 as
+ we use the compressed format
+ """
+ if bls == arkworks_bls or bls == fastest_bls:
+ return bytes(point.to_compressed_bytes())
+ return py_ecc_G2_to_bytes96(point)
+
+
+def bytes48_to_G1(bytes48):
+ """
+ Deserializes a purported compressed serialized
+ point in G1.
+ - No subgroup checks are performed
+ - If the bytearray is not a valid serialization
+ of a point in G1, then this method will raise
+ an exception
+ """
+ if bls == arkworks_bls or bls == fastest_bls:
+ return arkworks_G1.from_compressed_bytes_unchecked(bytes48)
+ return py_ecc_bytes48_to_G1(bytes48)
+
+
+def bytes96_to_G2(bytes96):
+ """
+ Deserializes a purported compressed serialized
+ point in G2.
+ - No subgroup checks are performed
+ - If the bytearray is not a valid serialization
+ of a point in G2, then this method will raise
+ an exception
+ """
+ if bls == arkworks_bls or bls == fastest_bls:
+ return arkworks_G2.from_compressed_bytes_unchecked(bytes96)
+ return py_ecc_bytes96_to_G2(bytes96)
@only_with_bls(alt_return=True)
diff --git a/tests/formats/fork_choice/README.md b/tests/formats/fork_choice/README.md
index f79d436eb7..c94b959338 100644
--- a/tests/formats/fork_choice/README.md
+++ b/tests/formats/fork_choice/README.md
@@ -146,10 +146,6 @@ finalized_checkpoint: {
epoch: int, -- Integer value from store.finalized_checkpoint.epoch
root: string, -- Encoded 32-byte value from store.finalized_checkpoint.root
}
-best_justified_checkpoint: {
- epoch: int, -- Integer value from store.best_justified_checkpoint.epoch
- root: string, -- Encoded 32-byte value from store.best_justified_checkpoint.root
-}
proposer_boost_root: string -- Encoded 32-byte value from store.proposer_boost_root
```
@@ -160,7 +156,6 @@ For example:
head: {slot: 32, root: '0xdaa1d49d57594ced0c35688a6da133abb086d191a2ebdfd736fad95299325aeb'}
justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'}
finalized_checkpoint: {epoch: 2, root: '0x40d32d6283ec11c53317a46808bc88f55657d93b95a1af920403187accf48f4f'}
- best_justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'}
proposer_boost_root: '0xdaa1d49d57594ced0c35688a6da133abb086d191a2ebdfd736fad95299325aeb'
```
diff --git a/tests/formats/kzg/README.md b/tests/formats/kzg/README.md
new file mode 100644
index 0000000000..b5bd720393
--- /dev/null
+++ b/tests/formats/kzg/README.md
@@ -0,0 +1,15 @@
+# KZG tests
+
+A test type for KZG libraries. Tests all the public interfaces that a KZG library required to implement EIP-4844 needs to provide, as defined in `polynomial-commitments.md`.
+
+We do not recommend rolling your own crypto or using an untested KZG library.
+
+The KZG test suite runner has the following handlers:
+
+- [`blob_to_kzg_commitment`](./blob_to_kzg_commitment.md)
+- [`compute_kzg_proof`](./compute_kzg_proof.md)
+- [`verify_kzg_proof`](./verify_kzg_proof.md)
+- [`compute_blob_kzg_proof`](./compute_blob_kzg_proof.md)
+- [`verify_blob_kzg_proof`](./verify_blob_kzg_proof.md)
+- [`verify_blob_kzg_proof_batch`](./verify_blob_kzg_proof_batch.md)
+
diff --git a/tests/formats/kzg/blob_to_kzg_commitment.md b/tests/formats/kzg/blob_to_kzg_commitment.md
new file mode 100644
index 0000000000..dbb1556a1d
--- /dev/null
+++ b/tests/formats/kzg/blob_to_kzg_commitment.md
@@ -0,0 +1,21 @@
+# Test format: Blob to KZG commitment
+
+Compute the KZG commitment for a given `blob`.
+
+## Test case format
+
+The test data is declared in a `data.yaml` file:
+
+```yaml
+input:
+ blob: Blob -- the data blob
+output: KZGCommitment -- The KZG commitment
+```
+
+- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`.
+
+All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
+
+## Condition
+
+The `blob_to_kzg_commitment` handler should compute the KZG commitment for `blob`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) it should error, i.e. the output should be `null`.
diff --git a/tests/formats/kzg/compute_blob_kzg_proof.md b/tests/formats/kzg/compute_blob_kzg_proof.md
new file mode 100644
index 0000000000..62fce37231
--- /dev/null
+++ b/tests/formats/kzg/compute_blob_kzg_proof.md
@@ -0,0 +1,23 @@
+# Test format: Compute blob KZG proof
+
+Compute the blob KZG proof for a given `blob`, that helps with quickly verifying that the KZG commitment for the blob is correct.
+
+## Test case format
+
+The test data is declared in a `data.yaml` file:
+
+```yaml
+input:
+ blob: Blob -- the data blob
+ commitment: Bytes48 -- the commitment to the blob
+output: KZGProof -- The blob KZG proof
+```
+
+- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`.
+- `commitment` here is encoded as a string: hexadecimal encoding of `48` bytes, prefixed with `0x`.
+
+All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
+
+## Condition
+
+The `compute_blob_kzg_proof` handler should compute the blob KZG proof for `blob`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) it should error, i.e. the output should be `null`.
diff --git a/tests/formats/kzg/compute_kzg_proof.md b/tests/formats/kzg/compute_kzg_proof.md
new file mode 100644
index 0000000000..0713d50d81
--- /dev/null
+++ b/tests/formats/kzg/compute_kzg_proof.md
@@ -0,0 +1,24 @@
+# Test format: Compute KZG proof
+
+Compute the KZG proof for a given `blob` and an evaluation point `z`.
+
+## Test case format
+
+The test data is declared in a `data.yaml` file:
+
+```yaml
+input:
+ blob: Blob -- the data blob representing a polynomial
+ z: Bytes32 -- bytes encoding the BLS field element at which the polynomial should be evaluated
+output: Tuple[KZGProof, Bytes32] -- The KZG proof and the value y = f(z)
+```
+
+- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`.
+- `z` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`.
+- `y` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`.
+
+All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
+
+## Condition
+
+The `compute_kzg_proof` handler should compute the KZG proof as well as the value `y` for evaluating the polynomial represented by `blob` at `z`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) or `z` is not a valid BLS field element, it should error, i.e. the output should be `null`.
diff --git a/tests/formats/kzg/verify_blob_kzg_proof.md b/tests/formats/kzg/verify_blob_kzg_proof.md
new file mode 100644
index 0000000000..dd0bcda5a9
--- /dev/null
+++ b/tests/formats/kzg/verify_blob_kzg_proof.md
@@ -0,0 +1,23 @@
+# Test format: Verify blob KZG proof
+
+Use the blob KZG proof to verify that the KZG commitment for a given `blob` is correct
+
+## Test case format
+
+The test data is declared in a `data.yaml` file:
+
+```yaml
+input:
+ blob: Blob -- the data blob
+ commitment: KZGCommitment -- the KZG commitment to the data blob
+ proof: KZGProof -- The KZG proof
+output: bool -- true (valid proof) or false (incorrect proof)
+```
+
+- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`.
+
+All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
+
+## Condition
+
+The `verify_blob_kzg_proof` handler should verify that `commitment` is a correct KZG commitment to `blob` by using the blob KZG proof `proof`, and the result should match the expected `output`. If the commitment or proof is invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve) or `blob` is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), it should error, i.e. the output should be `null`.
diff --git a/tests/formats/kzg/verify_blob_kzg_proof_batch.md b/tests/formats/kzg/verify_blob_kzg_proof_batch.md
new file mode 100644
index 0000000000..3bcc74d6bb
--- /dev/null
+++ b/tests/formats/kzg/verify_blob_kzg_proof_batch.md
@@ -0,0 +1,23 @@
+# Test format: Verify blob KZG proof batch
+
+Use the blob KZG proofs to verify that the KZG commitments for given `blob`s are correct
+
+## Test case format
+
+The test data is declared in a `data.yaml` file:
+
+```yaml
+input:
+ blob: List[Blob] -- the data blob
+ commitment: List[KZGCommitment] -- the KZG commitment to the data blob
+ proof: List[KZGProof] -- The KZG proof
+output: bool -- true (all proofs are valid) or false (some proofs incorrect)
+```
+
+- `blob`s here are encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`.
+
+All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
+
+## Condition
+
+The `verify_blob_kzg_proof_batch` handler should verify that `commitments` are correct KZG commitments to `blobs` by using the blob KZG proofs `proofs`, and the result should match the expected `output`. If any of the commitments or proofs are invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve) or any blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), it should error, i.e. the output should be `null`.
diff --git a/tests/formats/kzg/verify_kzg_proof.md b/tests/formats/kzg/verify_kzg_proof.md
new file mode 100644
index 0000000000..143466b66f
--- /dev/null
+++ b/tests/formats/kzg/verify_kzg_proof.md
@@ -0,0 +1,25 @@
+# Test format: Verify KZG proof
+
+Verify the KZG proof for a given `blob` and an evaluation point `z` that claims to result in a value of `y`.
+
+## Test case format
+
+The test data is declared in a `data.yaml` file:
+
+```yaml
+input:
+ commitment: KZGCommitment -- the KZG commitment to the data blob
+ z: Bytes32 -- bytes encoding the BLS field element at which the polynomial should be evaluated
+ y: Bytes32 -- the claimed result of the evaluation
+ proof: KZGProof -- The KZG proof
+output: bool -- true (valid proof) or false (incorrect proof)
+```
+
+- `z` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`.
+- `y` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`.
+
+All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
+
+## Condition
+
+The `verify_kzg_proof` handler should verify the KZG proof for evaluating the polynomial represented by `blob` at `z` resulting in the value `y`, and the result should match the expected `output`. If the commitment or proof is invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve) or `z` or `y` are not a valid BLS field element, it should error, i.e. the output should be `null`.
diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py
index c106810f8e..4456c2546b 100644
--- a/tests/generators/fork_choice/main.py
+++ b/tests/generators/fork_choice/main.py
@@ -1,15 +1,16 @@
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods
-from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB
+from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB
if __name__ == "__main__":
- phase_0_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [
+ # Note: Fork choice tests start from Altair - there are no fork choice test for phase 0 anymore
+ altair_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [
'get_head',
'on_block',
'ex_ante',
+ 'reorg',
+ 'withholding',
]}
- # No additional Altair specific finality tests, yet.
- altair_mods = phase_0_mods
# For merge `on_merge_block` test kind added with `pow_block_N.ssz` files with several
# PowBlock's which should be resolved by `get_pow_block(hash: Hash32) -> PowBlock` function
@@ -21,7 +22,6 @@
deneb_mods = capella_mods # No additional Capella specific fork choice tests
all_mods = {
- PHASE0: phase_0_mods,
ALTAIR: altair_mods,
BELLATRIX: bellatrix_mods,
CAPELLA: capella_mods,
diff --git a/tests/generators/kzg_4844/README.md b/tests/generators/kzg_4844/README.md
new file mode 100644
index 0000000000..ab81a85e86
--- /dev/null
+++ b/tests/generators/kzg_4844/README.md
@@ -0,0 +1,3 @@
+# KZG 4844 Test Generator
+
+These tests are specific to the KZG API required for implementing EIP-4844
\ No newline at end of file
diff --git a/tests/generators/kzg_4844/main.py b/tests/generators/kzg_4844/main.py
new file mode 100644
index 0000000000..699d1f369a
--- /dev/null
+++ b/tests/generators/kzg_4844/main.py
@@ -0,0 +1,801 @@
+"""
+KZG 4844 test vectors generator
+"""
+
+from hashlib import sha256
+from typing import Tuple, Iterable, Any, Callable, Dict
+
+from eth_utils import (
+ encode_hex,
+ int_to_big_endian,
+)
+
+from eth2spec.utils import bls
+from eth2spec.test.helpers.constants import DENEB
+from eth2spec.test.helpers.typing import SpecForkName
+from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
+from eth2spec.deneb import spec
+
+
+def expect_exception(func, *args):
+ try:
+ func(*args)
+ except Exception:
+ pass
+ else:
+ raise Exception("should have raised exception")
+
+
+def field_element_bytes(x):
+ return int.to_bytes(x % spec.BLS_MODULUS, 32, spec.ENDIANNESS)
+
+
+def field_element_bytes_unchecked(x):
+ return int.to_bytes(x, 32, spec.ENDIANNESS)
+
+
+def encode_hex_list(a):
+ return [encode_hex(x) for x in a]
+
+
+def bls_add_one(x):
+ """
+ Adds "one" (actually bls.G1()) to a compressed group element.
+ Useful to compute definitely incorrect proofs.
+ """
+ return bls.G1_to_bytes48(
+ bls.add(bls.bytes48_to_G1(x), bls.G1())
+ )
+
+
+def evaluate_blob_at(blob, z):
+ return field_element_bytes(
+ spec.evaluate_polynomial_in_evaluation_form(spec.blob_to_polynomial(blob), spec.bytes_to_bls_field(z))
+ )
+
+
+G1 = bls.G1_to_bytes48(bls.G1())
+P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" +
+ "0123456789abcdef0123456789abcdef0123456789abcdef")
+P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" +
+ "0123456789abcdef0123456789abcdef0123456789abcde0")
+BLS_MODULUS_BYTES = spec.BLS_MODULUS.to_bytes(32, spec.ENDIANNESS)
+
+BLOB_ALL_ZEROS = spec.Blob()
+BLOB_RANDOM_VALID1 = spec.Blob(b''.join([field_element_bytes(pow(2, n + 256, spec.BLS_MODULUS)) for n in range(4096)]))
+BLOB_RANDOM_VALID2 = spec.Blob(b''.join([field_element_bytes(pow(3, n + 256, spec.BLS_MODULUS)) for n in range(4096)]))
+BLOB_RANDOM_VALID3 = spec.Blob(b''.join([field_element_bytes(pow(5, n + 256, spec.BLS_MODULUS)) for n in range(4096)]))
+BLOB_ALL_MODULUS_MINUS_ONE = spec.Blob(b''.join([field_element_bytes(spec.BLS_MODULUS - 1) for n in range(4096)]))
+BLOB_ALMOST_ZERO = spec.Blob(b''.join([field_element_bytes(1 if n == 3211 else 0) for n in range(4096)]))
+BLOB_INVALID = spec.Blob(b'\xFF' * 4096 * 32)
+BLOB_INVALID_CLOSE = spec.Blob(b''.join(
+ [BLS_MODULUS_BYTES if n == 2111 else field_element_bytes(0) for n in range(4096)]
+))
+BLOB_INVALID_LENGTH_PLUS_ONE = BLOB_RANDOM_VALID1 + b"\x00"
+BLOB_INVALID_LENGTH_MINUS_ONE = BLOB_RANDOM_VALID1[:-1]
+
+VALID_BLOBS = [BLOB_ALL_ZEROS, BLOB_RANDOM_VALID1, BLOB_RANDOM_VALID2,
+ BLOB_RANDOM_VALID3, BLOB_ALL_MODULUS_MINUS_ONE, BLOB_ALMOST_ZERO]
+INVALID_BLOBS = [BLOB_INVALID, BLOB_INVALID_CLOSE, BLOB_INVALID_LENGTH_PLUS_ONE, BLOB_INVALID_LENGTH_MINUS_ONE]
+
+FE_VALID1 = field_element_bytes(0)
+FE_VALID2 = field_element_bytes(1)
+FE_VALID3 = field_element_bytes(2)
+FE_VALID4 = field_element_bytes(pow(5, 1235, spec.BLS_MODULUS))
+FE_VALID5 = field_element_bytes(spec.BLS_MODULUS - 1)
+FE_VALID6 = field_element_bytes(spec.ROOTS_OF_UNITY[1])
+VALID_FIELD_ELEMENTS = [FE_VALID1, FE_VALID2, FE_VALID3, FE_VALID4, FE_VALID5, FE_VALID6]
+
+FE_INVALID_EQUAL_TO_MODULUS = field_element_bytes_unchecked(spec.BLS_MODULUS)
+FE_INVALID_MODULUS_PLUS_ONE = field_element_bytes_unchecked(spec.BLS_MODULUS + 1)
+FE_INVALID_UINT256_MAX = field_element_bytes_unchecked(2**256 - 1)
+FE_INVALID_UINT256_MID = field_element_bytes_unchecked(2**256 - 2**128)
+FE_INVALID_LENGTH_PLUS_ONE = VALID_FIELD_ELEMENTS[0] + b"\x00"
+FE_INVALID_LENGTH_MINUS_ONE = VALID_FIELD_ELEMENTS[0][:-1]
+INVALID_FIELD_ELEMENTS = [FE_INVALID_EQUAL_TO_MODULUS, FE_INVALID_MODULUS_PLUS_ONE,
+ FE_INVALID_UINT256_MAX, FE_INVALID_UINT256_MID,
+ FE_INVALID_LENGTH_PLUS_ONE, FE_INVALID_LENGTH_MINUS_ONE]
+
+
+def hash(x):
+ return sha256(x).digest()
+
+
+def int_to_hex(n: int, byte_length: int = None) -> str:
+ byte_value = int_to_big_endian(n)
+ if byte_length:
+ byte_value = byte_value.rjust(byte_length, b'\x00')
+ return encode_hex(byte_value)
+
+
+def case01_blob_to_kzg_commitment():
+ # Valid cases
+ for blob in VALID_BLOBS:
+ commitment = spec.blob_to_kzg_commitment(blob)
+ identifier = f'{encode_hex(hash(blob))}'
+ yield f'blob_to_kzg_commitment_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ },
+ 'output': encode_hex(commitment)
+ }
+
+ # Edge case: Invalid blobs
+ for blob in INVALID_BLOBS:
+ identifier = f'{encode_hex(hash(blob))}'
+ expect_exception(spec.blob_to_kzg_commitment, blob)
+ yield f'blob_to_kzg_commitment_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'blob': encode_hex(blob)
+ },
+ 'output': None
+ }
+
+
+def case02_compute_kzg_proof():
+ # Valid cases
+ for blob in VALID_BLOBS:
+ for z in VALID_FIELD_ELEMENTS:
+ proof, y = spec.compute_kzg_proof(blob, z)
+ identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}'
+ yield f'compute_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'z': encode_hex(z),
+ },
+ 'output': (encode_hex(proof), encode_hex(y))
+ }
+
+ # Edge case: Invalid blobs
+ for blob in INVALID_BLOBS:
+ z = VALID_FIELD_ELEMENTS[0]
+ expect_exception(spec.compute_kzg_proof, blob, z)
+ identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}'
+ yield f'compute_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'z': encode_hex(z),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid z
+ for z in INVALID_FIELD_ELEMENTS:
+ blob = VALID_BLOBS[4]
+ expect_exception(spec.compute_kzg_proof, blob, z)
+ identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}'
+ yield f'compute_kzg_proof_case_invalid_z_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'z': encode_hex(z),
+ },
+ 'output': None
+ }
+
+
+def case03_verify_kzg_proof():
+ # Valid cases
+ for blob in VALID_BLOBS:
+ for z in VALID_FIELD_ELEMENTS:
+ proof, y = spec.compute_kzg_proof(blob, z)
+ commitment = spec.blob_to_kzg_commitment(blob)
+ assert spec.verify_kzg_proof(commitment, z, y, proof)
+ identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}'
+ yield f'verify_kzg_proof_case_correct_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'commitment': encode_hex(commitment),
+ 'z': encode_hex(z),
+ 'y': encode_hex(y),
+ 'proof': encode_hex(proof),
+ },
+ 'output': True
+ }
+
+ # Incorrect proofs
+ for blob in VALID_BLOBS:
+ for z in VALID_FIELD_ELEMENTS:
+ proof_orig, y = spec.compute_kzg_proof(blob, z)
+ proof = bls_add_one(proof_orig)
+ commitment = spec.blob_to_kzg_commitment(blob)
+ assert not spec.verify_kzg_proof(commitment, z, y, proof)
+ identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}'
+ yield f'verify_kzg_proof_case_incorrect_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'commitment': encode_hex(commitment),
+ 'z': encode_hex(z),
+ 'y': encode_hex(y),
+ 'proof': encode_hex(proof),
+ },
+ 'output': False
+ }
+
+ # Edge case: Invalid z
+ for z in INVALID_FIELD_ELEMENTS:
+ blob, validz = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[1]
+ proof, y = spec.compute_kzg_proof(blob, validz)
+ commitment = spec.blob_to_kzg_commitment(blob)
+ expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
+ identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}'
+ yield f'verify_kzg_proof_case_invalid_z_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'commitment': encode_hex(commitment),
+ 'z': encode_hex(z),
+ 'y': encode_hex(y),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid y
+ for y in INVALID_FIELD_ELEMENTS:
+ blob, z = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[1]
+ proof, _ = spec.compute_kzg_proof(blob, z)
+ commitment = spec.blob_to_kzg_commitment(blob)
+ expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
+ identifier = f'{encode_hex(hash(blob))}_{encode_hex(y)}'
+ yield f'verify_kzg_proof_case_invalid_y_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'commitment': encode_hex(commitment),
+ 'z': encode_hex(z),
+ 'y': encode_hex(y),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid proof, not in G1
+ blob, z = VALID_BLOBS[2], VALID_FIELD_ELEMENTS[0]
+ proof = P1_NOT_IN_G1
+ commitment = spec.blob_to_kzg_commitment(blob)
+ y = VALID_FIELD_ELEMENTS[1]
+ expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
+ yield 'verify_kzg_proof_case_proof_not_in_G1', {
+ 'input': {
+ 'commitment': encode_hex(commitment),
+ 'z': encode_hex(z),
+ 'y': encode_hex(y),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid proof, not on curve
+ blob, z = VALID_BLOBS[3], VALID_FIELD_ELEMENTS[1]
+ proof = P1_NOT_ON_CURVE
+ commitment = spec.blob_to_kzg_commitment(blob)
+ y = VALID_FIELD_ELEMENTS[1]
+ expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
+ yield 'verify_kzg_proof_case_proof_not_on_curve', {
+ 'input': {
+ 'commitment': encode_hex(commitment),
+ 'z': encode_hex(z),
+ 'y': encode_hex(y),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid proof, too few bytes
+ blob = VALID_BLOBS[1]
+ commitment = spec.blob_to_kzg_commitment(blob)
+ z = VALID_FIELD_ELEMENTS[4]
+ proof, y = spec.compute_kzg_proof(blob, z)
+ proof = proof[:-1]
+ expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
+ yield 'verify_kzg_proof_case_proof_too_few_bytes', {
+ 'input': {
+ 'commitment': encode_hex(commitment),
+ 'z': encode_hex(z),
+ 'y': encode_hex(y),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid proof, too many bytes
+ blob = VALID_BLOBS[1]
+ commitment = spec.blob_to_kzg_commitment(blob)
+ z = VALID_FIELD_ELEMENTS[4]
+ proof, y = spec.compute_kzg_proof(blob, z)
+ proof = proof + b"\x00"
+ expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
+ yield 'verify_kzg_proof_case_proof_too_many_bytes', {
+ 'input': {
+ 'commitment': encode_hex(commitment),
+ 'z': encode_hex(z),
+ 'y': encode_hex(y),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid commitment, not in G1
+ blob, z = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[3]
+ proof, y = spec.compute_kzg_proof(blob, z)
+ commitment = P1_NOT_IN_G1
+ expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
+ yield 'verify_kzg_proof_case_commitment_not_in_G1', {
+ 'input': {
+ 'commitment': encode_hex(commitment),
+ 'z': encode_hex(z),
+ 'y': encode_hex(y),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid commitment, not on curve
+ blob, z = VALID_BLOBS[1], VALID_FIELD_ELEMENTS[4]
+ proof, y = spec.compute_kzg_proof(blob, z)
+ commitment = P1_NOT_ON_CURVE
+ expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
+ yield 'verify_kzg_proof_case_commitment_not_on_curve', {
+ 'input': {
+ 'commitment': encode_hex(commitment),
+ 'z': encode_hex(z),
+ 'y': encode_hex(y),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid commitment, too few bytes
+ blob = VALID_BLOBS[1]
+ commitment = spec.blob_to_kzg_commitment(blob)[:-1]
+ z = VALID_FIELD_ELEMENTS[4]
+ proof, y = spec.compute_kzg_proof(blob, z)
+ expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
+ yield 'verify_kzg_proof_case_commitment_too_few_bytes', {
+ 'input': {
+ 'commitment': encode_hex(commitment),
+ 'z': encode_hex(z),
+ 'y': encode_hex(y),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid commitment, too many bytes
+ blob = VALID_BLOBS[1]
+ commitment = spec.blob_to_kzg_commitment(blob) + b"\x00"
+ z = VALID_FIELD_ELEMENTS[4]
+ proof, y = spec.compute_kzg_proof(blob, z)
+ expect_exception(spec.verify_kzg_proof, commitment, z, y, proof)
+ yield 'verify_kzg_proof_case_commitment_too_many_bytes', {
+ 'input': {
+ 'commitment': encode_hex(commitment),
+ 'z': encode_hex(z),
+ 'y': encode_hex(y),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+
+def case04_compute_blob_kzg_proof():
+ # Valid cases
+ for blob in VALID_BLOBS:
+ commitment = spec.blob_to_kzg_commitment(blob)
+ proof = spec.compute_blob_kzg_proof(blob, commitment)
+ identifier = f'{encode_hex(hash(blob))}'
+ yield f'compute_blob_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ },
+ 'output': encode_hex(proof)
+ }
+
+ # Edge case: Invalid blob
+ for blob in INVALID_BLOBS:
+ commitment = G1
+ expect_exception(spec.compute_blob_kzg_proof, blob, commitment)
+ identifier = f'{encode_hex(hash(blob))}'
+ yield f'compute_blob_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid commitment, not in G1
+ commitment = P1_NOT_IN_G1
+ blob = VALID_BLOBS[1]
+ expect_exception(spec.compute_blob_kzg_proof, blob, commitment)
+ identifier = f'{encode_hex(hash(blob))}'
+ yield 'compute_blob_kzg_proof_case_invalid_commitment_not_in_G1', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid commitment, not on curve
+ commitment = P1_NOT_ON_CURVE
+ blob = VALID_BLOBS[1]
+ expect_exception(spec.compute_blob_kzg_proof, blob, commitment)
+ identifier = f'{encode_hex(hash(blob))}'
+ yield 'compute_blob_kzg_proof_case_invalid_commitment_not_on_curve', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ },
+ 'output': None
+ }
+
+
+def case05_verify_blob_kzg_proof():
+ # Valid cases
+ for blob in VALID_BLOBS:
+ commitment = spec.blob_to_kzg_commitment(blob)
+ proof = spec.compute_blob_kzg_proof(blob, commitment)
+ assert spec.verify_blob_kzg_proof(blob, commitment, proof)
+ identifier = f'{encode_hex(hash(blob))}'
+ yield f'verify_blob_kzg_proof_case_correct_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ 'proof': encode_hex(proof),
+ },
+ 'output': True
+ }
+
+ # Incorrect proofs
+ for blob in VALID_BLOBS:
+ commitment = spec.blob_to_kzg_commitment(blob)
+ proof = bls_add_one(spec.compute_blob_kzg_proof(blob, commitment))
+ assert not spec.verify_blob_kzg_proof(blob, commitment, proof)
+ identifier = f'{encode_hex(hash(blob))}'
+ yield f'verify_blob_kzg_proof_case_incorrect_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ 'proof': encode_hex(proof),
+ },
+ 'output': False
+ }
+
+ # Edge case: Invalid proof, not in G1
+ blob = VALID_BLOBS[2]
+ proof = P1_NOT_IN_G1
+ commitment = G1
+ expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
+ yield 'verify_blob_kzg_proof_case_proof_not_in_G1', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid proof, not on curve
+ blob = VALID_BLOBS[1]
+ proof = P1_NOT_ON_CURVE
+ commitment = G1
+ expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
+ yield 'verify_blob_kzg_proof_case_proof_not_on_curve', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid proof, too few bytes
+ blob = VALID_BLOBS[1]
+ commitment = spec.blob_to_kzg_commitment(blob)
+ proof = spec.compute_blob_kzg_proof(blob, commitment)[:-1]
+ expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
+ yield 'verify_blob_kzg_proof_case_proof_too_few_bytes', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid proof, too many bytes
+ blob = VALID_BLOBS[1]
+ commitment = spec.blob_to_kzg_commitment(blob)
+ proof = spec.compute_blob_kzg_proof(blob, commitment) + b"\x00"
+ expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
+ yield 'verify_blob_kzg_proof_case_proof_too_many_bytes', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid commitment, not in G1
+ blob = VALID_BLOBS[0]
+ proof = G1
+ commitment = P1_NOT_IN_G1
+ expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
+ yield 'verify_blob_kzg_proof_case_commitment_not_in_G1', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid commitment, not on curve
+ blob = VALID_BLOBS[2]
+ proof = G1
+ commitment = P1_NOT_ON_CURVE
+ expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
+ yield 'verify_blob_kzg_proof_case_commitment_not_on_curve', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid commitment, too few bytes
+ blob = VALID_BLOBS[1]
+ commitment = spec.blob_to_kzg_commitment(blob)
+ proof = spec.compute_blob_kzg_proof(blob, commitment)
+ commitment = commitment[:-1]
+ expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
+ yield 'verify_blob_kzg_proof_case_commitment_too_few_bytes', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid commitment, too many bytes
+ blob = VALID_BLOBS[1]
+ commitment = spec.blob_to_kzg_commitment(blob)
+ proof = spec.compute_blob_kzg_proof(blob, commitment)
+ commitment = commitment + b"\x00"
+ expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
+ yield 'verify_blob_kzg_proof_case_commitment_too_many_bytes', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid blob
+ for blob in INVALID_BLOBS:
+ proof = G1
+ commitment = G1
+ expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof)
+ identifier = f'{encode_hex(hash(blob))}'
+ yield f'verify_blob_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'blob': encode_hex(blob),
+ 'commitment': encode_hex(commitment),
+ 'proof': encode_hex(proof),
+ },
+ 'output': None
+ }
+
+
+def case06_verify_blob_kzg_proof_batch():
+ # Valid cases
+ proofs = []
+ commitments = []
+ for blob in VALID_BLOBS:
+ commitments.append(spec.blob_to_kzg_commitment(blob))
+ proofs.append(spec.compute_blob_kzg_proof(blob, commitments[-1]))
+
+ for i in range(len(proofs)):
+ assert spec.verify_blob_kzg_proof_batch(VALID_BLOBS[:i], commitments[:i], proofs[:i])
+ identifier = f'{encode_hex(hash(b"".join(VALID_BLOBS[:i])))}'
+ yield f'verify_blob_kzg_proof_batch_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'blobs': encode_hex_list(VALID_BLOBS[:i]),
+ 'commitments': encode_hex_list(commitments[:i]),
+ 'proofs': encode_hex_list(proofs[:i]),
+ },
+ 'output': True
+ }
+
+ # Incorrect proof
+ proofs_incorrect = [bls_add_one(proofs[0])] + proofs[1:]
+ assert not spec.verify_blob_kzg_proof_batch(VALID_BLOBS, commitments, proofs_incorrect)
+ yield 'verify_blob_kzg_proof_batch_case_invalid_proof', {
+ 'input': {
+ 'blobs': encode_hex_list(VALID_BLOBS),
+ 'commitments': encode_hex_list(commitments),
+ 'proofs': encode_hex_list(proofs_incorrect),
+ },
+ 'output': False
+ }
+
+ # Edge case: Invalid blobs
+ for blob in INVALID_BLOBS:
+ blobs_invalid = VALID_BLOBS[:4] + [blob] + VALID_BLOBS[5:]
+ expect_exception(spec.verify_blob_kzg_proof_batch, blobs_invalid, commitments, proofs)
+ identifier = f'{encode_hex(hash(blob))}'
+ yield f'verify_blob_kzg_proof_batch_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
+ 'input': {
+ 'blobs': encode_hex_list(blobs_invalid),
+ 'commitments': encode_hex_list(commitments),
+ 'proofs': encode_hex_list(proofs),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid proof, not in G1
+ proofs_invalid_notG1 = [P1_NOT_IN_G1] + proofs[1:]
+ expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_notG1)
+ yield 'verify_blob_kzg_proof_batch_case_proof_not_in_G1', {
+ 'input': {
+ 'blobs': encode_hex_list(VALID_BLOBS),
+ 'commitments': encode_hex_list(commitments),
+ 'proofs': encode_hex_list(proofs_invalid_notG1),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid proof, not on curve
+ proofs_invalid_notCurve = proofs[:1] + [P1_NOT_ON_CURVE] + proofs[2:]
+ expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_notCurve)
+ yield 'verify_blob_kzg_proof_batch_case_proof_not_on_curve', {
+ 'input': {
+ 'blobs': encode_hex_list(VALID_BLOBS),
+ 'commitments': encode_hex_list(commitments),
+ 'proofs': encode_hex_list(proofs_invalid_notCurve),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid proof, too few bytes
+ proofs_invalid_tooFewBytes = proofs[:1] + [proofs[1][:-1]] + proofs[2:]
+ expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_tooFewBytes)
+ yield 'verify_blob_kzg_proof_batch_case_proof_too_few_bytes', {
+ 'input': {
+ 'blobs': encode_hex_list(VALID_BLOBS),
+ 'commitments': encode_hex_list(commitments),
+ 'proofs': encode_hex_list(proofs_invalid_tooFewBytes),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid proof, too many bytes
+ proofs_invalid_tooManyBytes = proofs[:1] + [proofs[1] + b"\x00"] + proofs[2:]
+ expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_tooManyBytes)
+ yield 'verify_blob_kzg_proof_batch_case_proof_too_many_bytes', {
+ 'input': {
+ 'blobs': encode_hex_list(VALID_BLOBS),
+ 'commitments': encode_hex_list(commitments),
+ 'proofs': encode_hex_list(proofs_invalid_tooManyBytes),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid commitment, not in G1
+ commitments_invalid_notG1 = commitments[:2] + [P1_NOT_IN_G1] + commitments[3:]
+ expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_notG1)
+ yield 'verify_blob_kzg_proof_batch_case_commitment_not_in_G1', {
+ 'input': {
+ 'blobs': encode_hex_list(VALID_BLOBS),
+ 'commitments': encode_hex_list(commitments_invalid_notG1),
+ 'proofs': encode_hex_list(proofs),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid commitment, not on curve
+ commitments_invalid_notCurve = commitments[:3] + [P1_NOT_ON_CURVE] + commitments[4:]
+ expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_notCurve)
+ yield 'verify_blob_kzg_proof_batch_case_not_on_curve', {
+ 'input': {
+ 'blobs': encode_hex_list(VALID_BLOBS),
+ 'commitments': encode_hex_list(commitments_invalid_notCurve),
+ 'proofs': encode_hex_list(proofs),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid commitment, too few bytes
+ commitments_invalid_tooFewBytes = commitments[:3] + [commitments[3][:-1]] + commitments[4:]
+ expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooFewBytes)
+ yield 'verify_blob_kzg_proof_batch_case_too_few_bytes', {
+ 'input': {
+ 'blobs': encode_hex_list(VALID_BLOBS),
+ 'commitments': encode_hex_list(commitments_invalid_tooFewBytes),
+ 'proofs': encode_hex_list(proofs),
+ },
+ 'output': None
+ }
+
+ # Edge case: Invalid commitment, too many bytes
+ commitments_invalid_tooManyBytes = commitments[:3] + [commitments[3] + b"\x00"] + commitments[4:]
+ expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooManyBytes)
+ yield 'verify_blob_kzg_proof_batch_case_too_many_bytes', {
+ 'input': {
+ 'blobs': encode_hex_list(VALID_BLOBS),
+ 'commitments': encode_hex_list(commitments_invalid_tooManyBytes),
+ 'proofs': encode_hex_list(proofs),
+ },
+ 'output': None
+ }
+
+ # Edge case: Blob length different
+ expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS[:-1], commitments, proofs)
+ yield 'verify_blob_kzg_proof_batch_case_blob_length_different', {
+ 'input': {
+ 'blobs': encode_hex_list(VALID_BLOBS[:-1]),
+ 'commitments': encode_hex_list(commitments),
+ 'proofs': encode_hex_list(proofs),
+ },
+ 'output': None
+ }
+
+ # Edge case: Commitment length different
+ expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments[:-1], proofs)
+ yield 'verify_blob_kzg_proof_batch_case_commitment_length_different', {
+ 'input': {
+ 'blobs': encode_hex_list(VALID_BLOBS),
+ 'commitments': encode_hex_list(commitments[:-1]),
+ 'proofs': encode_hex_list(proofs),
+ },
+ 'output': None
+ }
+
+ # Edge case: Proof length different
+ expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs[:-1])
+ yield 'verify_blob_kzg_proof_batch_case_proof_length_different', {
+ 'input': {
+ 'blobs': encode_hex_list(VALID_BLOBS),
+ 'commitments': encode_hex_list(commitments),
+ 'proofs': encode_hex_list(proofs[:-1]),
+ },
+ 'output': None
+ }
+
+
+def create_provider(fork_name: SpecForkName,
+ handler_name: str,
+ test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider:
+
+ def prepare_fn() -> None:
+ # Nothing to load / change in spec. Maybe in future forks.
+ # Put the tests into the general config category, to not require any particular configuration.
+ return
+
+ def cases_fn() -> Iterable[gen_typing.TestCase]:
+ for data in test_case_fn():
+ (case_name, case_content) = data
+ yield gen_typing.TestCase(
+ fork_name=fork_name,
+ preset_name='general',
+ runner_name='kzg',
+ handler_name=handler_name,
+ suite_name='small',
+ case_name=case_name,
+ case_fn=lambda: [('data', 'data', case_content)]
+ )
+
+ return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
+
+
+if __name__ == "__main__":
+ bls.use_arkworks()
+ gen_runner.run_generator("kzg", [
+ # DENEB
+ create_provider(DENEB, 'blob_to_kzg_commitment', case01_blob_to_kzg_commitment),
+ create_provider(DENEB, 'compute_kzg_proof', case02_compute_kzg_proof),
+ create_provider(DENEB, 'verify_kzg_proof', case03_verify_kzg_proof),
+ create_provider(DENEB, 'compute_blob_kzg_proof', case04_compute_blob_kzg_proof),
+ create_provider(DENEB, 'verify_blob_kzg_proof', case05_verify_blob_kzg_proof),
+ create_provider(DENEB, 'verify_blob_kzg_proof_batch', case06_verify_blob_kzg_proof_batch),
+ ])
diff --git a/tests/generators/kzg_4844/requirements.txt b/tests/generators/kzg_4844/requirements.txt
new file mode 100644
index 0000000000..1822486863
--- /dev/null
+++ b/tests/generators/kzg_4844/requirements.txt
@@ -0,0 +1,2 @@
+pytest>=4.4
+../../../[generator]