From bf53f5e0b792e00a45013a16adb72a952e527cb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Bene=C5=A1?= Date: Thu, 31 Oct 2024 08:22:24 -0600 Subject: [PATCH 1/3] refactor: nuking `Token::privately_mint_private_note(...)` (#9616) --- .../contracts/token_contract/src/main.nr | 19 ------------------- yarn-project/bot/src/factory.ts | 2 +- .../src/benchmarks/bench_prover.test.ts | 2 +- .../src/benchmarks/bench_tx_size_fees.test.ts | 2 +- .../end-to-end/src/e2e_fees/fees_test.ts | 2 +- .../src/e2e_fees/private_payments.test.ts | 4 ++-- .../private_transfer_recursion.test.ts | 10 ++++++---- 7 files changed, 12 insertions(+), 29 deletions(-) diff --git a/noir-projects/noir-contracts/contracts/token_contract/src/main.nr b/noir-projects/noir-contracts/contracts/token_contract/src/main.nr index f08ef9fb987..65968ff0622 100644 --- a/noir-projects/noir-contracts/contracts/token_contract/src/main.nr +++ b/noir-projects/noir-contracts/contracts/token_contract/src/main.nr @@ -221,25 +221,6 @@ contract Token { // docs:end:insert_from_public } // docs:end:mint_private - // TODO: Nuke this - test functions do not belong to token contract! - #[private] - fn privately_mint_private_note(amount: Field) { - let caller = context.msg_sender(); - let caller_ovpk_m = get_public_keys(caller).ovpk_m; - storage.balances.at(caller).add(caller, U128::from_integer(amount)).emit( - encode_and_encrypt_note(&mut context, caller_ovpk_m, caller, caller), - ); - Token::at(context.this_address()) - .assert_minter_and_mint(context.msg_sender(), amount) - .enqueue(&mut context); - } - #[public] - #[internal] - fn assert_minter_and_mint(minter: AztecAddress, amount: Field) { - assert(storage.minters.at(minter).read(), "caller is not minter"); - let supply = storage.total_supply.read() + U128::from_integer(amount); - storage.total_supply.write(supply); - } // docs:start:shield #[public] fn shield(from: AztecAddress, amount: Field, secret_hash: Field, nonce: Field) { diff --git a/yarn-project/bot/src/factory.ts b/yarn-project/bot/src/factory.ts index 541acc975da..053dfb8f04b 100644 --- a/yarn-project/bot/src/factory.ts +++ b/yarn-project/bot/src/factory.ts @@ -149,7 +149,7 @@ export class BotFactory { calls.push( isStandardToken - ? token.methods.privately_mint_private_note(MINT_BALANCE).request() + ? token.methods.mint_to_private(sender, MINT_BALANCE).request() : token.methods.mint(MINT_BALANCE, sender, sender).request(), ); } diff --git a/yarn-project/end-to-end/src/benchmarks/bench_prover.test.ts b/yarn-project/end-to-end/src/benchmarks/bench_prover.test.ts index 2d8254319b0..203cf8599f0 100644 --- a/yarn-project/end-to-end/src/benchmarks/bench_prover.test.ts +++ b/yarn-project/end-to-end/src/benchmarks/bench_prover.test.ts @@ -108,7 +108,7 @@ describe('benchmarks/proving', () => { await Promise.all([ initialGasContract.methods.claim(initialFpContract.address, 1e12, claimSecret, messageLeafIndex).send().wait(), initialTokenContract.methods.mint_public(initialSchnorrWallet.getAddress(), 1e12).send().wait(), - initialTokenContract.methods.privately_mint_private_note(1e12).send().wait(), + initialTokenContract.methods.mint_to_private(initialSchnorrWallet.getAddress(), 1e12).send().wait(), ]); recipient = CompleteAddress.random(); diff --git a/yarn-project/end-to-end/src/benchmarks/bench_tx_size_fees.test.ts b/yarn-project/end-to-end/src/benchmarks/bench_tx_size_fees.test.ts index e294bd3de89..fa5f17bd325 100644 --- a/yarn-project/end-to-end/src/benchmarks/bench_tx_size_fees.test.ts +++ b/yarn-project/end-to-end/src/benchmarks/bench_tx_size_fees.test.ts @@ -71,7 +71,7 @@ describe('benchmarks/tx_size_fees', () => { feeJuice.methods.claim(fpc.address, 100e9, fpcSecret, fpcLeafIndex).send().wait(), feeJuice.methods.claim(aliceWallet.getAddress(), 100e9, aliceSecret, aliceLeafIndex).send().wait(), ]); - await token.methods.privately_mint_private_note(100e9).send().wait(); + await token.methods.mint_to_private(aliceWallet.getAddress(), 100e9).send().wait(); await token.methods.mint_public(aliceWallet.getAddress(), 100e9).send().wait(); }); diff --git a/yarn-project/end-to-end/src/e2e_fees/fees_test.ts b/yarn-project/end-to-end/src/e2e_fees/fees_test.ts index 3ce49b5aa87..ade885f5234 100644 --- a/yarn-project/end-to-end/src/e2e_fees/fees_test.ts +++ b/yarn-project/end-to-end/src/e2e_fees/fees_test.ts @@ -116,7 +116,7 @@ export class FeesTest { /** Alice mints Token */ async mintToken(amount: bigint) { const balanceBefore = await this.token.methods.balance_of_private(this.aliceAddress).simulate(); - await this.token.methods.privately_mint_private_note(amount).send().wait(); + await this.token.methods.mint_to_private(this.aliceAddress, amount).send().wait(); const balanceAfter = await this.token.methods.balance_of_private(this.aliceAddress).simulate(); expect(balanceAfter).toEqual(balanceBefore + amount); } diff --git a/yarn-project/end-to-end/src/e2e_fees/private_payments.test.ts b/yarn-project/end-to-end/src/e2e_fees/private_payments.test.ts index 02b6d3c7180..23fe8817f8f 100644 --- a/yarn-project/end-to-end/src/e2e_fees/private_payments.test.ts +++ b/yarn-project/end-to-end/src/e2e_fees/private_payments.test.ts @@ -196,7 +196,7 @@ describe('e2e_fees private_payment', () => { */ const newlyMintedBananas = 10n; const tx = await bananaCoin.methods - .privately_mint_private_note(newlyMintedBananas) + .mint_to_private(aliceAddress, newlyMintedBananas) .send({ fee: { gasSettings, @@ -373,7 +373,7 @@ describe('e2e_fees private_payment', () => { await expect( bananaCoin.methods - .privately_mint_private_note(10) + .mint_to_private(aliceAddress, 10) .send({ // we need to skip public simulation otherwise the PXE refuses to accept the TX skipPublicSimulation: true, diff --git a/yarn-project/end-to-end/src/e2e_token_contract/private_transfer_recursion.test.ts b/yarn-project/end-to-end/src/e2e_token_contract/private_transfer_recursion.test.ts index 1425c0b16f0..dbc2ab59820 100644 --- a/yarn-project/end-to-end/src/e2e_token_contract/private_transfer_recursion.test.ts +++ b/yarn-project/end-to-end/src/e2e_token_contract/private_transfer_recursion.test.ts @@ -18,10 +18,12 @@ describe('e2e_token_contract private transfer recursion', () => { }); async function mintNotes(noteAmounts: bigint[]): Promise { - // Mint all notes, 4 at a time - for (let mintedNotes = 0; mintedNotes < noteAmounts.length; mintedNotes += 4) { - const toMint = noteAmounts.slice(mintedNotes, mintedNotes + 4); // We mint 4 notes at a time - const actions = toMint.map(amt => asset.methods.privately_mint_private_note(amt).request()); + // We mint only 3 notes in 1 transaction as that is the maximum public data writes we can squeeze into a tx. + // --> Minting one note requires 19 public data writes (16 for the note encrypted log, 3 for note hiding point). + const notesPerIteration = 3; + for (let mintedNotes = 0; mintedNotes < noteAmounts.length; mintedNotes += notesPerIteration) { + const toMint = noteAmounts.slice(mintedNotes, mintedNotes + notesPerIteration); + const actions = toMint.map(amt => asset.methods.mint_to_private(wallets[0].getAddress(), amt).request()); await new BatchCall(wallets[0], actions).send().wait(); } From 392114a0a66bd580175ff7a07b0c6d899d69be8f Mon Sep 17 00:00:00 2001 From: Alex Gherghisan Date: Thu, 31 Oct 2024 14:43:45 +0000 Subject: [PATCH 2/3] feat: spartan proving (#9584) This PR adds K8s config to deploy a network with proving enabled --------- Co-authored-by: PhilWindle <60546371+PhilWindle@users.noreply.github.com> Co-authored-by: ludamad --- .../arithmetization/mega_arithmetization.hpp | 6 +- .../templates/deploy-l1-verifier.yaml | 58 +++++++++++++++ .../aztec-network/templates/prover-agent.yaml | 73 +++++++++++++++++++ .../aztec-network/templates/prover-node.yaml | 4 +- spartan/aztec-network/templates/pxe.yaml | 4 +- spartan/aztec-network/values.yaml | 22 +++++- .../values/1-validator-with-proving.yaml | 37 ++++++++++ .../aztec-network/values/1-validators.yaml | 1 - yarn-project/bot/src/config.ts | 2 +- .../cli/src/cmds/l1/deploy_l1_verifier.ts | 30 ++++++-- yarn-project/cli/src/cmds/l1/index.ts | 20 ++++- yarn-project/cli/src/utils/commands.ts | 11 ++- 12 files changed, 242 insertions(+), 26 deletions(-) create mode 100644 spartan/aztec-network/templates/deploy-l1-verifier.yaml create mode 100644 spartan/aztec-network/templates/prover-agent.yaml create mode 100644 spartan/aztec-network/values/1-validator-with-proving.yaml diff --git a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/arithmetization/mega_arithmetization.hpp b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/arithmetization/mega_arithmetization.hpp index 379602e2463..67a77dfa715 100644 --- a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/arithmetization/mega_arithmetization.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/arithmetization/mega_arithmetization.hpp @@ -96,8 +96,8 @@ template class MegaArith { this->delta_range = 25000; this->elliptic = 80000; this->aux = 100000; - this->poseidon2_external = 30000; - this->poseidon2_internal = 150000; + this->poseidon2_external = 30128; + this->poseidon2_internal = 172000; this->lookup = 200000; } }; @@ -248,4 +248,4 @@ using MegaArithmetization = MegaArith; template concept HasAdditionalSelectors = IsAnyOf>; -} // namespace bb \ No newline at end of file +} // namespace bb diff --git a/spartan/aztec-network/templates/deploy-l1-verifier.yaml b/spartan/aztec-network/templates/deploy-l1-verifier.yaml new file mode 100644 index 00000000000..90530932fdc --- /dev/null +++ b/spartan/aztec-network/templates/deploy-l1-verifier.yaml @@ -0,0 +1,58 @@ +{{- if .Values.network.setupL2Contracts }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "aztec-network.fullname" . }}-deploy-l1-verifier + labels: + {{- include "aztec-network.labels" . | nindent 4 }} +spec: + template: + metadata: + labels: + {{- include "aztec-network.selectorLabels" . | nindent 8 }} + app: deploy-l1-verifier + spec: + restartPolicy: OnFailure + containers: + - name: deploy-l1-verifier + image: {{ .Values.images.aztec.image }} + command: + - /bin/bash + - -c + - | + set -e + + [ $ENABLE = "true" ] || exit 0 + + until curl -s -X GET "$AZTEC_NODE_URL/status"; do + echo "Waiting for Aztec node $AZTEC_NODE_URL..." + sleep 5 + done + echo "Boot node is ready!" + + export ROLLUP_CONTRACT_ADDRESS=$(curl -X POST -H 'Content-Type: application/json' \ + -d '{"jsonrpc":"2.0","method":"node_getL1ContractAddresses","params":[],"id":1}' \ + "$AZTEC_NODE_URL" \ + | jq -r '.result.rollupAddress.data') + + echo "Rollup contract address: $ROLLUP_CONTRACT_ADDRESS" + node /usr/src/yarn-project/aztec/dest/bin/index.js deploy-l1-verifier --verifier real + echo "L1 verifier deployed" + env: + - name: ENABLE + value: {{ .Values.jobs.deployL1Verifier.enable | quote }} + - name: NODE_NO_WARNINGS + value: "1" + - name: DEBUG + value: "aztec:*" + - name: LOG_LEVEL + value: "debug" + - name: ETHEREUM_HOST + value: {{ include "aztec-network.ethereumHost" . | quote }} + - name: L1_CHAIN_ID + value: {{ .Values.ethereum.chainId | quote }} + - name: PRIVATE_KEY + value: "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + - name: AZTEC_NODE_URL + value: {{ include "aztec-network.bootNodeUrl" . | quote }} +{{ end }} diff --git a/spartan/aztec-network/templates/prover-agent.yaml b/spartan/aztec-network/templates/prover-agent.yaml new file mode 100644 index 00000000000..2ded11498ee --- /dev/null +++ b/spartan/aztec-network/templates/prover-agent.yaml @@ -0,0 +1,73 @@ +{{- if .Values.proverAgent.enabled }} +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: {{ include "aztec-network.fullname" . }}-prover-agent + labels: + {{- include "aztec-network.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.proverAgent.replicas }} + selector: + matchLabels: + {{- include "aztec-network.selectorLabels" . | nindent 6 }} + app: prover-agent + template: + metadata: + labels: + {{- include "aztec-network.selectorLabels" . | nindent 8 }} + app: prover-agent + spec: + initContainers: + - name: wait-for-prover-node + image: {{ .Values.images.curl.image }} + command: + - /bin/sh + - -c + - | + until curl -s -X POST "$PROVER_JOB_SOURCE_URL/status"; do + echo "Waiting for Prover node $PROVER_JOB_SOURCE_URL ..." + sleep 5 + done + echo "Prover node is ready!" + {{- if .Values.telemetry.enabled }} + until curl --head --silent {{ include "aztec-network.otelCollectorMetricsEndpoint" . }} > /dev/null; do + echo "Waiting for OpenTelemetry collector..." + sleep 5 + done + echo "OpenTelemetry collector is ready!" + {{- end }} + env: + - name: PROVER_JOB_SOURCE_URL + value: http://{{ include "aztec-network.fullname" . }}-prover-node.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.proverNode.service.nodePort }} + containers: + - name: prover-agent + image: "{{ .Values.images.aztec.image }}" + imagePullPolicy: {{ .Values.images.aztec.pullPolicy }} + command: + - "/bin/bash" + - "-c" + - "node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --prover" + env: + - name: LOG_LEVEL + value: "{{ .Values.proverAgent.logLevel }}" + - name: LOG_JSON + value: "1" + - name: DEBUG + value: "{{ .Values.proverAgent.debug }}" + - name: PROVER_REAL_PROOFS + value: "{{ .Values.proverAgent.realProofs }}" + - name: PROVER_JOB_SOURCE_URL + value: http://{{ include "aztec-network.fullname" . }}-prover-node.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.proverNode.service.nodePort }} + - name: PROVER_AGENT_ENABLED + value: "true" + - name: PROVER_AGENT_CONCURRENCY + value: {{ .Values.proverAgent.concurrency | quote }} + - name: HARDWARE_CONCURRENCY + value: {{ .Values.proverAgent.bb.hardwareConcurrency | quote }} + - name: OTEL_EXPORTER_OTLP_METRICS_ENDPOINT + value: {{ include "aztec-network.otelCollectorMetricsEndpoint" . | quote }} + - name: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT + value: {{ include "aztec-network.otelCollectorTracesEndpoint" . | quote }} + - name: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT + value: {{ include "aztec-network.otelCollectorLogsEndpoint" . | quote }} +{{- end }} diff --git a/spartan/aztec-network/templates/prover-node.yaml b/spartan/aztec-network/templates/prover-node.yaml index 6ec388150d3..3158f3145a5 100644 --- a/spartan/aztec-network/templates/prover-node.yaml +++ b/spartan/aztec-network/templates/prover-node.yaml @@ -67,7 +67,7 @@ spec: command: - "/bin/bash" - "-c" - - "source /shared/contracts.env && env && node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --prover-node --prover --archiver" + - "source /shared/contracts.env && env && node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js start --prover-node --archiver" volumeMounts: - name: shared-volume mountPath: /shared @@ -163,4 +163,4 @@ spec: - port: {{ .Values.proverNode.service.nodePort }} name: node {{ end }} -{{ end }} \ No newline at end of file +{{ end }} diff --git a/spartan/aztec-network/templates/pxe.yaml b/spartan/aztec-network/templates/pxe.yaml index 0ae3702943e..d885313a6f8 100644 --- a/spartan/aztec-network/templates/pxe.yaml +++ b/spartan/aztec-network/templates/pxe.yaml @@ -57,6 +57,8 @@ spec: value: "{{ .Values.pxe.logLevel }}" - name: DEBUG value: "{{ .Values.pxe.debug }}" + - name: PXE_PROVER_ENABLED + value: "{{ .Values.pxe.proverEnabled }}" ports: - name: http containerPort: {{ .Values.pxe.service.port }} @@ -119,4 +121,4 @@ spec: nodePort: {{ .Values.pxe.service.nodePort }} {{- end }} {{ end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/spartan/aztec-network/values.yaml b/spartan/aztec-network/values.yaml index 8905fc155eb..30056d43827 100644 --- a/spartan/aztec-network/values.yaml +++ b/spartan/aztec-network/values.yaml @@ -30,7 +30,7 @@ bootNode: p2pUdpPort: 40400 nodePort: 8080 logLevel: "debug" - debug: "aztec:*,-aztec:avm_simulator*,-aztec:libp2p_service*,-aztec:circuits:artifact_hash,-json-rpc*" + debug: "aztec:*,-aztec:avm_simulator*,-aztec:libp2p_service*,-aztec:circuits:artifact_hash,-json-rpc*,-aztec:world-state:database,-aztec:l2_block_stream*" coinbaseAddress: "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" sequencer: maxSecondsBetweenBlocks: 0 @@ -67,7 +67,7 @@ validator: p2pUdpPort: 40400 nodePort: 8080 logLevel: "debug" - debug: "aztec:*,-aztec:avm_simulator*,-aztec:libp2p_service*,-aztec:circuits:artifact_hash,-json-rpc*" + debug: "aztec:*,-aztec:avm_simulator*,-aztec:libp2p_service*,-aztec:circuits:artifact_hash,-json-rpc*,-aztec:world-state:database,-aztec:l2_block_stream*" sequencer: maxSecondsBetweenBlocks: 0 minTxsPerBlock: 1 @@ -88,9 +88,9 @@ proverNode: service: nodePort: 8080 logLevel: "debug" - debug: "aztec:*,-aztec:avm_simulator*,-aztec:libp2p_service*,-aztec:circuits:artifact_hash,-json-rpc*" + debug: "aztec:*,-aztec:avm_simulator*,-aztec:libp2p_service*,-aztec:circuits:artifact_hash,-json-rpc*,-aztec:world-state:database,-aztec:l2_block_stream*" realProofs: false - proverAgentEnabled: true + proverAgentEnabled: false resources: requests: memory: "2Gi" @@ -102,6 +102,7 @@ pxe: externalHost: "" logLevel: "debug" debug: "aztec:*,-aztec:avm_simulator*,-aztec:libp2p_service*,-aztec:circuits:artifact_hash,-json-rpc*" + proverEnable: false replicas: 1 service: port: 8080 @@ -176,3 +177,16 @@ ethereum: memory: "2Gi" cpu: "200m" storage: "8Gi" + +proverAgent: + enabled: true + replicas: 1 + debug: "aztec:*,-aztec:avm_simulator*,-aztec:libp2p_service*,-aztec:circuits:artifact_hash,-json-rpc*,-aztec:world-state:database,-aztec:l2_block_stream*" + realProofs: false + concurrency: 1 + bb: + hardwareConcurrency: "" + +jobs: + deployL1Verifier: + enable: false diff --git a/spartan/aztec-network/values/1-validator-with-proving.yaml b/spartan/aztec-network/values/1-validator-with-proving.yaml new file mode 100644 index 00000000000..ac7ca644a1f --- /dev/null +++ b/spartan/aztec-network/values/1-validator-with-proving.yaml @@ -0,0 +1,37 @@ +validator: + replicas: 1 + validatorKeys: + - 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 + validatorAddresses: + - 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 + validator: + disabled: false + +bootNode: + validator: + disabled: true + +proverNode: + realProofs: true + +proverAgent: + replicas: 6 + realProofs: true + bb: + hardwareConcurrency: 16 + +pxe: + proverEnabled: true + +bot: + enabled: true + pxeProverEnabled: true + txIntervalSeconds: 200 + +jobs: + deployL1Verifier: + enable: true + +telemetry: + enabled: true + otelCollectorEndpoint: http://metrics-opentelemetry-collector.metrics:4318 diff --git a/spartan/aztec-network/values/1-validators.yaml b/spartan/aztec-network/values/1-validators.yaml index 28bb6a0b621..53038895c1f 100644 --- a/spartan/aztec-network/values/1-validators.yaml +++ b/spartan/aztec-network/values/1-validators.yaml @@ -1,5 +1,4 @@ validator: - debug: "aztec:*,-aztec:avm_simulator:*,-aztec:libp2p_service" replicas: 1 validatorKeys: - 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 diff --git a/yarn-project/bot/src/config.ts b/yarn-project/bot/src/config.ts index a4f36f0ab8a..2cd14d0d78c 100644 --- a/yarn-project/bot/src/config.ts +++ b/yarn-project/bot/src/config.ts @@ -122,7 +122,7 @@ export const botConfigMappings: ConfigMappingsType = { description: 'Which chain the bot follows', defaultValue: 'NONE', parseEnv(val) { - if (!botFollowChain.includes(val as any)) { + if (!(botFollowChain as readonly string[]).includes(val.toUpperCase())) { throw new Error(`Invalid value for BOT_FOLLOW_CHAIN: ${val}`); } return val as BotFollowChain; diff --git a/yarn-project/cli/src/cmds/l1/deploy_l1_verifier.ts b/yarn-project/cli/src/cmds/l1/deploy_l1_verifier.ts index ab0ddb2dfcf..4150479fbd6 100644 --- a/yarn-project/cli/src/cmds/l1/deploy_l1_verifier.ts +++ b/yarn-project/cli/src/cmds/l1/deploy_l1_verifier.ts @@ -5,9 +5,10 @@ import { type DebugLogger, type LogFn } from '@aztec/foundation/log'; import { InvalidOptionArgumentError } from 'commander'; // @ts-expect-error solc-js doesn't publish its types https://github.com/ethereum/solc-js/issues/689 import solc from 'solc'; -import { getContract } from 'viem'; +import { type Hex, getContract } from 'viem'; export async function deployUltraHonkVerifier( + rollupAddress: Hex | undefined, ethRpcUrl: string, l1ChainId: string, privateKey: string | undefined, @@ -32,14 +33,21 @@ export async function deployUltraHonkVerifier( createEthereumChain(ethRpcUrl, l1ChainId).chainInfo, ); - const pxe = await createCompatibleClient(pxeRpcUrl, debugLogger); - const { l1ContractAddresses } = await pxe.getNodeInfo(); + if (!rollupAddress && pxeRpcUrl) { + const pxe = await createCompatibleClient(pxeRpcUrl, debugLogger); + const { l1ContractAddresses } = await pxe.getNodeInfo(); + rollupAddress = l1ContractAddresses.rollupAddress.toString(); + } + + if (!rollupAddress) { + throw new InvalidOptionArgumentError('Missing rollup address'); + } const { RollupAbi } = await import('@aztec/l1-artifacts'); const rollup = getContract({ abi: RollupAbi, - address: l1ContractAddresses.rollupAddress.toString(), + address: rollupAddress, client: walletClient, }); @@ -64,6 +72,7 @@ export async function deployUltraHonkVerifier( } export async function deployMockVerifier( + rollupAddress: Hex | undefined, ethRpcUrl: string, l1ChainId: string, privateKey: string | undefined, @@ -87,12 +96,19 @@ export async function deployMockVerifier( ); log(`Deployed MockVerifier at ${mockVerifierAddress.toString()}`); - const pxe = await createCompatibleClient(pxeRpcUrl, debugLogger); - const { l1ContractAddresses } = await pxe.getNodeInfo(); + if (!rollupAddress && pxeRpcUrl) { + const pxe = await createCompatibleClient(pxeRpcUrl, debugLogger); + const { l1ContractAddresses } = await pxe.getNodeInfo(); + rollupAddress = l1ContractAddresses.rollupAddress.toString(); + } + + if (!rollupAddress) { + throw new InvalidOptionArgumentError('Missing rollup address'); + } const rollup = getContract({ abi: RollupAbi, - address: l1ContractAddresses.rollupAddress.toString(), + address: rollupAddress, client: walletClient, }); diff --git a/yarn-project/cli/src/cmds/l1/index.ts b/yarn-project/cli/src/cmds/l1/index.ts index 37cd7a7e7dd..80c0d514c3b 100644 --- a/yarn-project/cli/src/cmds/l1/index.ts +++ b/yarn-project/cli/src/cmds/l1/index.ts @@ -1,12 +1,13 @@ import { EthAddress } from '@aztec/foundation/eth-address'; import { type DebugLogger, type LogFn } from '@aztec/foundation/log'; -import { type Command } from 'commander'; +import { type Command, Option } from 'commander'; import { ETHEREUM_HOST, PRIVATE_KEY, l1ChainIdOption, + makePxeOption, parseAztecAddress, parseBigint, parseEthereumAddress, @@ -207,8 +208,19 @@ export function injectCommands(program: Command, log: LogFn, debugLogger: DebugL 'Url of the ethereum host. Chain identifiers localhost and testnet can be used', ETHEREUM_HOST, ) - .requiredOption('--l1-chain-id ', 'The chain id of the L1 network', '31337') - .addOption(pxeOption) + .addOption( + new Option('--l1-chain-id ', 'The chain id of the L1 network') + .env('L1_CHAIN_ID') + .default('31337') + .makeOptionMandatory(true), + ) + .addOption(makePxeOption(false).conflicts('rollup-address')) + .addOption( + new Option('--rollup-address ', 'The address of the rollup contract') + .env('ROLLUP_CONTRACT_ADDRESS') + .argParser(parseEthereumAddress) + .conflicts('rpc-url'), + ) .option('--l1-private-key ', 'The L1 private key to use for deployment', PRIVATE_KEY) .option( '-m, --mnemonic ', @@ -222,6 +234,7 @@ export function injectCommands(program: Command, log: LogFn, debugLogger: DebugL const { deployMockVerifier, deployUltraHonkVerifier } = await import('./deploy_l1_verifier.js'); if (options.verifier === 'mock') { await deployMockVerifier( + options.rollupAddress?.toString(), options.l1RpcUrl, options.l1ChainId, options.l1PrivateKey, @@ -232,6 +245,7 @@ export function injectCommands(program: Command, log: LogFn, debugLogger: DebugL ); } else { await deployUltraHonkVerifier( + options.rollupAddress?.toString(), options.l1RpcUrl, options.l1ChainId, options.l1PrivateKey, diff --git a/yarn-project/cli/src/utils/commands.ts b/yarn-project/cli/src/utils/commands.ts index 656f07282f8..5bc66a8a5c0 100644 --- a/yarn-project/cli/src/utils/commands.ts +++ b/yarn-project/cli/src/utils/commands.ts @@ -29,10 +29,13 @@ export function addOptions(program: Command, options: Option[]) { return program; } -export const pxeOption = new Option('-u, --rpc-url ', 'URL of the PXE') - .env('PXE_URL') - .default(`http://${LOCALHOST}:8080`) - .makeOptionMandatory(true); +export const makePxeOption = (mandatory: boolean) => + new Option('-u, --rpc-url ', 'URL of the PXE') + .env('PXE_URL') + .default(`http://${LOCALHOST}:8080`) + .makeOptionMandatory(mandatory); + +export const pxeOption = makePxeOption(true); export const l1ChainIdOption = new Option('-c, --l1-chain-id ', 'Chain ID of the ethereum host') .env('L1_CHAIN_ID') From feace70727f9e5a971809955030a8ea88ce84f4a Mon Sep 17 00:00:00 2001 From: Innokentii Sennovskii Date: Thu, 31 Oct 2024 14:44:03 +0000 Subject: [PATCH 3/3] fix: Resolution of bugs from bigfield audits (#9547) This PR resolves Critical, High, Medium, Low and some informational issues from the bigfield test audits by ZKSecurity, Zellic and Spearbit --------- Co-authored-by: Sarkoxed --- .../ultra_circuit_builder.test.cpp | 16 +- .../cpp/src/barretenberg/dsl/CMakeLists.txt | 2 +- .../dsl/acir_format/recursion_constraint.cpp | 11 +- .../plonk/composer/ultra_composer.test.cpp | 8 +- .../stdlib/encryption/ecdsa/ecdsa_impl.hpp | 5 +- .../ultra_recursive_verifier.cpp | 4 +- .../aggregation_state/aggregation_state.hpp | 10 +- .../plonk_recursion/verifier/verifier.hpp | 2 +- .../stdlib/primitives/bigfield/bigfield.hpp | 185 ++++- .../primitives/bigfield/bigfield.test.cpp | 269 ++++++- .../primitives/bigfield/bigfield_impl.hpp | 738 +++++++++++------- .../primitives/bigfield/goblin_field.hpp | 10 +- .../primitives/biggroup/biggroup_impl.hpp | 4 +- .../primitives/biggroup/biggroup_tables.hpp | 11 +- .../stdlib/primitives/bool/bool.hpp | 2 + .../stdlib/primitives/databus/databus.hpp | 2 +- .../stdlib/primitives/field/field.cpp | 25 +- .../stdlib/primitives/field/field.hpp | 20 +- .../stdlib/primitives/field/field.test.cpp | 16 + .../primitives/plookup/plookup.test.cpp | 12 +- .../translator_recursive_verifier.cpp | 3 +- .../ultra_circuit_builder.cpp | 37 +- .../ultra_circuit_builder.hpp | 47 +- .../ultra_honk/ultra_honk.test.cpp | 8 +- 24 files changed, 991 insertions(+), 456 deletions(-) diff --git a/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_builder.test.cpp b/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_builder.test.cpp index e7a3b8eaf52..ae2b514f434 100644 --- a/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_builder.test.cpp +++ b/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_builder.test.cpp @@ -611,22 +611,20 @@ TEST(UltraCircuitConstructor, NonNativeFieldMultiplication) const auto split_into_limbs = [&](const uint512_t& input) { constexpr size_t NUM_BITS = 68; - std::array limbs; + std::array limbs; limbs[0] = input.slice(0, NUM_BITS).lo; limbs[1] = input.slice(NUM_BITS * 1, NUM_BITS * 2).lo; limbs[2] = input.slice(NUM_BITS * 2, NUM_BITS * 3).lo; limbs[3] = input.slice(NUM_BITS * 3, NUM_BITS * 4).lo; - limbs[4] = fr(input.lo); return limbs; }; - const auto get_limb_witness_indices = [&](const std::array& limbs) { - std::array limb_indices; + const auto get_limb_witness_indices = [&](const std::array& limbs) { + std::array limb_indices; limb_indices[0] = circuit_constructor.add_variable(limbs[0]); limb_indices[1] = circuit_constructor.add_variable(limbs[1]); limb_indices[2] = circuit_constructor.add_variable(limbs[2]); limb_indices[3] = circuit_constructor.add_variable(limbs[3]); - limb_indices[4] = circuit_constructor.add_variable(limbs[4]); return limb_indices; }; const uint512_t BINARY_BASIS_MODULUS = uint512_t(1) << (68 * 4); @@ -671,22 +669,20 @@ TEST(UltraCircuitConstructor, NonNativeFieldMultiplicationSortCheck) const auto split_into_limbs = [&](const uint512_t& input) { constexpr size_t NUM_BITS = 68; - std::array limbs; + std::array limbs; limbs[0] = input.slice(0, NUM_BITS).lo; limbs[1] = input.slice(NUM_BITS * 1, NUM_BITS * 2).lo; limbs[2] = input.slice(NUM_BITS * 2, NUM_BITS * 3).lo; limbs[3] = input.slice(NUM_BITS * 3, NUM_BITS * 4).lo; - limbs[4] = fr(input.lo); return limbs; }; - const auto get_limb_witness_indices = [&](const std::array& limbs) { - std::array limb_indices; + const auto get_limb_witness_indices = [&](const std::array& limbs) { + std::array limb_indices; limb_indices[0] = circuit_constructor.add_variable(limbs[0]); limb_indices[1] = circuit_constructor.add_variable(limbs[1]); limb_indices[2] = circuit_constructor.add_variable(limbs[2]); limb_indices[3] = circuit_constructor.add_variable(limbs[3]); - limb_indices[4] = circuit_constructor.add_variable(limbs[4]); return limb_indices; }; const uint512_t BINARY_BASIS_MODULUS = uint512_t(1) << (68 * 4); diff --git a/barretenberg/cpp/src/barretenberg/dsl/CMakeLists.txt b/barretenberg/cpp/src/barretenberg/dsl/CMakeLists.txt index 2ddd66ceb49..1f44c3c2746 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/CMakeLists.txt +++ b/barretenberg/cpp/src/barretenberg/dsl/CMakeLists.txt @@ -13,7 +13,7 @@ set(DSL_DEPENDENCIES stdlib_schnorr stdlib_honk_verifier) -if (NOT WASM) +if (NOT WASM AND NOT DISABLE_AZTEC_VM) list(APPEND DSL_DEPENDENCIES libdeflate::libdeflate_static vm) endif() diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/recursion_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/recursion_constraint.cpp index f0f1fe1bff2..0015e9390ae 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/recursion_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/recursion_constraint.cpp @@ -114,11 +114,12 @@ AggregationObjectIndices create_recursion_constraints(Builder& builder, if (!inner_aggregation_indices_all_zero) { std::array aggregation_elements; for (size_t i = 0; i < 4; ++i) { - aggregation_elements[i] = - bn254::BaseField(field_ct::from_witness_index(&builder, aggregation_input[4 * i]), - field_ct::from_witness_index(&builder, aggregation_input[4 * i + 1]), - field_ct::from_witness_index(&builder, aggregation_input[4 * i + 2]), - field_ct::from_witness_index(&builder, aggregation_input[4 * i + 3])); + aggregation_elements[i] = bn254::BaseField::construct_from_limbs( + field_ct::from_witness_index(&builder, aggregation_input[4 * i]), + field_ct::from_witness_index(&builder, aggregation_input[4 * i + 1]), + field_ct::from_witness_index(&builder, aggregation_input[4 * i + 2]), + field_ct::from_witness_index(&builder, aggregation_input[4 * i + 3])); + aggregation_elements[i].assert_is_in_field(); } // If we have a previous aggregation object, assign it to `previous_aggregation` so that it is included diff --git a/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.test.cpp b/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.test.cpp index e46237a4306..bf20e3800ca 100644 --- a/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.test.cpp +++ b/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.test.cpp @@ -593,22 +593,20 @@ TYPED_TEST(ultra_plonk_composer, non_native_field_multiplication) const auto split_into_limbs = [&](const uint512_t& input) { constexpr size_t NUM_BITS = 68; - std::array limbs; + std::array limbs; limbs[0] = input.slice(0, NUM_BITS).lo; limbs[1] = input.slice(NUM_BITS * 1, NUM_BITS * 2).lo; limbs[2] = input.slice(NUM_BITS * 2, NUM_BITS * 3).lo; limbs[3] = input.slice(NUM_BITS * 3, NUM_BITS * 4).lo; - limbs[4] = fr(input.lo); return limbs; }; - const auto get_limb_witness_indices = [&](const std::array& limbs) { - std::array limb_indices; + const auto get_limb_witness_indices = [&](const std::array& limbs) { + std::array limb_indices; limb_indices[0] = builder.add_variable(limbs[0]); limb_indices[1] = builder.add_variable(limbs[1]); limb_indices[2] = builder.add_variable(limbs[2]); limb_indices[3] = builder.add_variable(limbs[3]); - limb_indices[4] = builder.add_variable(limbs[4]); return limb_indices; }; const uint512_t BINARY_BASIS_MODULUS = uint512_t(1) << (68 * 4); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_impl.hpp b/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_impl.hpp index 02c69d8f770..aefc1a2316b 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/encryption/ecdsa/ecdsa_impl.hpp @@ -82,8 +82,9 @@ bool_t ecdsa_verify_signature(const stdlib::byte_array& messag // Read more about this at: https://www.derpturkey.com/inherent-malleability-of-ecdsa-signatures/amp/ s.assert_less_than((Fr::modulus + 1) / 2); - Fr u1 = z / s; - Fr u2 = r / s; + // We already checked that s is nonzero + Fr u1 = z.div_without_denominator_check(s); + Fr u2 = r.div_without_denominator_check(s); public_key.validate_on_curve(); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp index bf1ac000ac9..aa2c057b8b9 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/ultra_recursive_verifier.cpp @@ -70,8 +70,8 @@ UltraRecursiveVerifier_::AggregationObject UltraRecursiveVerifier_public_inputs[key->recursive_proof_public_input_indices[idx]]; idx++; } - base_field_vals[j] = - typename Curve::BaseField(bigfield_limbs[0], bigfield_limbs[1], bigfield_limbs[2], bigfield_limbs[3]); + base_field_vals[j] = Curve::BaseField::construct_from_limbs( + bigfield_limbs[0], bigfield_limbs[1], bigfield_limbs[2], bigfield_limbs[3]); } nested_pairing_points[i] = typename Curve::Group(base_field_vals[0], base_field_vals[1]); } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/plonk_recursion/aggregation_state/aggregation_state.hpp b/barretenberg/cpp/src/barretenberg/stdlib/plonk_recursion/aggregation_state/aggregation_state.hpp index 52bbdd10dab..ced1a04cdb7 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/plonk_recursion/aggregation_state/aggregation_state.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/plonk_recursion/aggregation_state/aggregation_state.hpp @@ -109,11 +109,11 @@ aggregation_state convert_witness_indices_to_agg_obj(Builder& builder, { std::array aggregation_elements; for (size_t i = 0; i < 4; ++i) { - aggregation_elements[i] = - typename Curve::BaseField(Curve::ScalarField::from_witness_index(&builder, witness_indices[4 * i]), - Curve::ScalarField::from_witness_index(&builder, witness_indices[4 * i + 1]), - Curve::ScalarField::from_witness_index(&builder, witness_indices[4 * i + 2]), - Curve::ScalarField::from_witness_index(&builder, witness_indices[4 * i + 3])); + aggregation_elements[i] = Curve::BaseField::construct_from_limbs( + Curve::ScalarField::from_witness_index(&builder, witness_indices[4 * i]), + Curve::ScalarField::from_witness_index(&builder, witness_indices[4 * i + 1]), + Curve::ScalarField::from_witness_index(&builder, witness_indices[4 * i + 2]), + Curve::ScalarField::from_witness_index(&builder, witness_indices[4 * i + 3])); aggregation_elements[i].assert_is_in_field(); } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/plonk_recursion/verifier/verifier.hpp b/barretenberg/cpp/src/barretenberg/stdlib/plonk_recursion/verifier/verifier.hpp index df0bffaf55e..b1fa8239519 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/plonk_recursion/verifier/verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/plonk_recursion/verifier/verifier.hpp @@ -345,7 +345,7 @@ aggregation_state verify_proof_(typename Curve::Builder* context, l1.create_range_constraint(fq_ct::NUM_LIMB_BITS, "l1"); l2.create_range_constraint(fq_ct::NUM_LIMB_BITS, "l2"); l3.create_range_constraint(fq_ct::NUM_LAST_LIMB_BITS, "l3"); - return fq_ct(l0, l1, l2, l3, false); + return fq_ct::unsafe_construct_from_limbs(l0, l1, l2, l3, false); }; fr_ct recursion_separator_challenge = transcript.get_challenge_field_element("separator", 2); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.hpp index 0eb5d90a7fe..dd3011149d8 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.hpp @@ -25,15 +25,14 @@ template class bigfield { struct Limb { Limb() {} - Limb(const field_t& input, const uint256_t max = uint256_t(0)) + Limb(const field_t& input, const uint256_t& max = DEFAULT_MAXIMUM_LIMB) : element(input) { - if (input.witness_index == IS_CONSTANT) { - maximum_value = uint256_t(input.additive_constant) + 1; - } else if (max != uint256_t(0)) { - maximum_value = max; + if (input.is_constant()) { + maximum_value = uint256_t(input.additive_constant); + ASSERT(maximum_value <= max); } else { - maximum_value = DEFAULT_MAXIMUM_LIMB; + maximum_value = max; } } friend std::ostream& operator<<(std::ostream& os, const Limb& a) @@ -95,40 +94,104 @@ template class bigfield { : bigfield(nullptr, uint256_t(value)) {} - // we assume the limbs have already been normalized! - bigfield(const field_t& a, - const field_t& b, - const field_t& c, - const field_t& d, - const bool can_overflow = false) + /** + * @brief Construct a bigfield element from binary limbs that are already reduced + * + * @details This API should only be used by bigfield and other stdlib members for efficiency and with extreme care. + * We need it in cases where we precompute and reduce the elements, for example, and then put them in a table + * + */ + static bigfield unsafe_construct_from_limbs(const field_t& a, + const field_t& b, + const field_t& c, + const field_t& d, + const bool can_overflow = false) { - context = a.context; - binary_basis_limbs[0] = Limb(field_t(a)); - binary_basis_limbs[1] = Limb(field_t(b)); - binary_basis_limbs[2] = Limb(field_t(c)); - binary_basis_limbs[3] = + ASSERT(a.is_constant() == b.is_constant() && b.is_constant() == c.is_constant() && + c.is_constant() == d.is_constant()); + bigfield result; + result.context = a.context; + result.binary_basis_limbs[0] = Limb(field_t(a)); + result.binary_basis_limbs[1] = Limb(field_t(b)); + result.binary_basis_limbs[2] = Limb(field_t(c)); + result.binary_basis_limbs[3] = Limb(field_t(d), can_overflow ? DEFAULT_MAXIMUM_LIMB : DEFAULT_MAXIMUM_MOST_SIGNIFICANT_LIMB); - prime_basis_limb = - (binary_basis_limbs[3].element * shift_3) - .add_two(binary_basis_limbs[2].element * shift_2, binary_basis_limbs[1].element * shift_1); - prime_basis_limb += (binary_basis_limbs[0].element); + result.prime_basis_limb = (result.binary_basis_limbs[3].element * shift_3) + .add_two(result.binary_basis_limbs[2].element * shift_2, + result.binary_basis_limbs[1].element * shift_1); + result.prime_basis_limb += (result.binary_basis_limbs[0].element); + return result; }; - // we assume the limbs have already been normalized! - bigfield(const field_t& a, - const field_t& b, - const field_t& c, - const field_t& d, - const field_t& prime_limb, - const bool can_overflow = false) + /** + * @brief Construct a bigfield element from binary limbs that are already reduced and ensure they are range + * constrained + * + */ + static bigfield construct_from_limbs(const field_t& a, + const field_t& b, + const field_t& c, + const field_t& d, + const bool can_overflow = false) { - context = a.context; - binary_basis_limbs[0] = Limb(field_t(a)); - binary_basis_limbs[1] = Limb(field_t(b)); - binary_basis_limbs[2] = Limb(field_t(c)); - binary_basis_limbs[3] = + ASSERT(a.is_constant() == b.is_constant() && b.is_constant() == c.is_constant() && + c.is_constant() == d.is_constant()); + bigfield result; + auto ctx = a.context; + result.context = a.context; + result.binary_basis_limbs[0] = Limb(field_t(a)); + result.binary_basis_limbs[1] = Limb(field_t(b)); + result.binary_basis_limbs[2] = Limb(field_t(c)); + result.binary_basis_limbs[3] = Limb(field_t(d), can_overflow ? DEFAULT_MAXIMUM_LIMB : DEFAULT_MAXIMUM_MOST_SIGNIFICANT_LIMB); - prime_basis_limb = prime_limb; + result.prime_basis_limb = (result.binary_basis_limbs[3].element * shift_3) + .add_two(result.binary_basis_limbs[2].element * shift_2, + result.binary_basis_limbs[1].element * shift_1); + result.prime_basis_limb += (result.binary_basis_limbs[0].element); + const size_t num_last_limb_bits = (can_overflow) ? NUM_LIMB_BITS : NUM_LAST_LIMB_BITS; + if constexpr (HasPlookup) { + ctx->range_constrain_two_limbs(result.binary_basis_limbs[0].element.get_normalized_witness_index(), + result.binary_basis_limbs[1].element.get_normalized_witness_index(), + static_cast(NUM_LIMB_BITS), + static_cast(NUM_LIMB_BITS)); + ctx->range_constrain_two_limbs(result.binary_basis_limbs[2].element.get_normalized_witness_index(), + result.binary_basis_limbs[3].element.get_normalized_witness_index(), + static_cast(NUM_LIMB_BITS), + static_cast(num_last_limb_bits)); + + } else { + a.create_range_constraint(NUM_LIMB_BITS); + b.create_range_constraint(NUM_LIMB_BITS); + c.create_range_constraint(NUM_LIMB_BITS); + d.create_range_constraint(num_last_limb_bits); + } + return result; + }; + /** + * @brief Construct a bigfield element from binary limbs and a prime basis limb that are already reduced + * + * @details This API should only be used by bigfield and other stdlib members for efficiency and with extreme care. + * We need it in cases where we precompute and reduce the elements, for example, and then put them in a table + * + */ + static bigfield unsafe_construct_from_limbs(const field_t& a, + const field_t& b, + const field_t& c, + const field_t& d, + const field_t& prime_limb, + const bool can_overflow = false) + { + ASSERT(a.is_constant() == b.is_constant() && b.is_constant() == c.is_constant() && + c.is_constant() == d.is_constant() && d.is_constant() == prime_limb.is_constant()); + bigfield result; + result.context = a.context; + result.binary_basis_limbs[0] = Limb(field_t(a)); + result.binary_basis_limbs[1] = Limb(field_t(b)); + result.binary_basis_limbs[2] = Limb(field_t(c)); + result.binary_basis_limbs[3] = + Limb(field_t(d), can_overflow ? DEFAULT_MAXIMUM_LIMB : DEFAULT_MAXIMUM_MOST_SIGNIFICANT_LIMB); + result.prime_basis_limb = prime_limb; + return result; }; bigfield(const byte_array& bytes); @@ -155,6 +218,9 @@ template class bigfield { static constexpr uint512_t modulus_u512 = uint512_t(modulus); static constexpr uint64_t NUM_LIMB_BITS = NUM_LIMB_BITS_IN_FIELD_SIMULATION; static constexpr uint64_t NUM_LAST_LIMB_BITS = modulus_u512.get_msb() + 1 - (NUM_LIMB_BITS * 3); + // The quotient reduction checks currently only support >=250 bit moduli and moduli >256 have never been tested + // (Check zkSecurity audit report issue #12 for explanation) + static_assert(modulus_u512.get_msb() + 1 >= 250 && modulus_u512.get_msb() + 1 <= 256); static constexpr uint1024_t DEFAULT_MAXIMUM_REMAINDER = (uint1024_t(1) << (NUM_LIMB_BITS * 3 + NUM_LAST_LIMB_BITS)) - uint1024_t(1); static constexpr uint256_t DEFAULT_MAXIMUM_LIMB = (uint256_t(1) << NUM_LIMB_BITS) - uint256_t(1); @@ -163,6 +229,10 @@ template class bigfield { static constexpr uint64_t LOG2_BINARY_MODULUS = NUM_LIMB_BITS * NUM_LIMBS; static constexpr bool is_composite = true; // false only when fr is native + // This limits the size of all vectors that are being used to 16 (we don't really need more) + static constexpr size_t MAXIMUM_SUMMAND_COUNT_LOG = 4; + static constexpr size_t MAXIMUM_SUMMAND_COUNT = 1 << MAXIMUM_SUMMAND_COUNT_LOG; + static constexpr uint256_t prime_basis_maximum_limb = uint256_t(modulus_u512.slice(NUM_LIMB_BITS * (NUM_LIMBS - 1), NUM_LIMB_BITS* NUM_LIMBS)); static constexpr Basis prime_basis{ uint512_t(bb::fr::modulus), bb::fr::modulus.get_msb() + 1 }; @@ -191,6 +261,8 @@ template class bigfield { byte_array to_byte_array() const { byte_array result(get_context()); + // Prevents aliases + assert_is_in_field(); field_t lo = binary_basis_limbs[0].element + (binary_basis_limbs[1].element * shift_1); field_t hi = binary_basis_limbs[2].element + (binary_basis_limbs[3].element * shift_1); // n.b. this only works if NUM_LIMB_BITS * 2 is divisible by 8 @@ -285,6 +357,7 @@ template class bigfield { bool check_for_zero); static bigfield div_without_denominator_check(const std::vector& numerators, const bigfield& denominator); + bigfield div_without_denominator_check(const bigfield& denominator); static bigfield div_check_denominator_nonzero(const std::vector& numerators, const bigfield& denominator); bigfield conditional_negate(const bool_t& predicate) const; @@ -392,14 +465,25 @@ template class bigfield { prime_basis_limb.tag); } - static constexpr uint512_t get_maximum_unreduced_value(const size_t num_products = 1) + static constexpr uint512_t get_maximum_unreduced_value() { - // return (uint512_t(1) << 256); - uint1024_t maximum_product = uint1024_t(binary_basis.modulus) * uint1024_t(prime_basis.modulus) / - uint1024_t(static_cast(num_products)); + // This = `T * n = 2^272 * |BN(Fr)|` So this equals n*2^t + + uint1024_t maximum_product = uint1024_t(binary_basis.modulus) * uint1024_t(prime_basis.modulus); // TODO: compute square root (the following is a lower bound, so good for the CRT use) + // We use -1 to stay safer, because it provides additional space to avoid the overflow, but get_msb() by itself + // should be enough + uint64_t maximum_product_bits = maximum_product.get_msb() - 1; + return (uint512_t(1) << (maximum_product_bits >> 1)); + } + + // If we encounter this maximum value of a bigfield we stop execution + static constexpr uint512_t get_prohibited_maximum_value() + { + uint1024_t maximum_product = uint1024_t(binary_basis.modulus) * uint1024_t(prime_basis.modulus); uint64_t maximum_product_bits = maximum_product.get_msb() - 1; - return (uint512_t(1) << (maximum_product_bits >> 1)) - uint512_t(1); + const size_t arbitrary_secure_margin = 20; + return (uint512_t(1) << ((maximum_product_bits >> 1) + arbitrary_secure_margin)) - uint512_t(1); } static constexpr uint1024_t get_maximum_crt_product() @@ -501,11 +585,24 @@ template class bigfield { static constexpr uint64_t MAX_ADDITION_LOG = 10; // the rationale of the expression is we should not overflow Fr when applying any bigfield operation (e.g. *) and // starting with this max limb size - static constexpr uint64_t MAX_UNREDUCED_LIMB_SIZE = (bb::fr::modulus.get_msb() + 1) / 2 - MAX_ADDITION_LOG; - static constexpr uint256_t get_maximum_unreduced_limb_value() { return uint256_t(1) << MAX_UNREDUCED_LIMB_SIZE; } + static constexpr uint64_t MAXIMUM_SIZE_THAT_WOULDNT_OVERFLOW = + (bb::fr::modulus.get_msb() - MAX_ADDITION_LOG - NUM_LIMB_BITS) / 2; + + // If the logarithm of the maximum value of a limb is more than this, we need to reduce + static constexpr uint64_t MAX_UNREDUCED_LIMB_BITS = + NUM_LIMB_BITS + 10; // We allowa an element to be added to itself 10 times. There is no actual usecase - static_assert(MAX_UNREDUCED_LIMB_SIZE < (NUM_LIMB_BITS * 2)); + static constexpr uint64_t PROHIBITED_LIMB_BITS = + MAX_UNREDUCED_LIMB_BITS + + 5; // Shouldn't be reachable through addition, reduction should happen earlier. If we detect this, we stop + + static constexpr uint256_t get_maximum_unreduced_limb_value() { return uint256_t(1) << MAX_UNREDUCED_LIMB_BITS; } + + // If we encounter this maximum value of a limb we stop execution + static constexpr uint256_t get_prohibited_maximum_limb_value() { return uint256_t(1) << PROHIBITED_LIMB_BITS; } + + static_assert(PROHIBITED_LIMB_BITS < MAXIMUM_SIZE_THAT_WOULDNT_OVERFLOW); private: static std::pair compute_quotient_remainder_values(const bigfield& a, @@ -560,7 +657,9 @@ template class bigfield { const bigfield& right, const bigfield& quotient, const bigfield& remainder); - void reduction_check(const size_t num_products = 1) const; + void reduction_check() const; + + void sanity_check() const; }; // namespace stdlib diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.test.cpp index b2194698bf8..81949bb40e9 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.test.cpp @@ -41,6 +41,35 @@ template class stdlib_bigfield : public testing::Test { typedef typename bn254::witness_ct witness_ct; public: + static void test_add_to_lower_limb_regression() + { + auto builder = Builder(); + fq_ct constant = fq_ct(1); + fq_ct var = fq_ct::create_from_u512_as_witness(&builder, 1); + fr_ct small_var = witness_ct(&builder, fr(1)); + fq_ct mixed = fq_ct(1).add_to_lower_limb(small_var, 1); + fq_ct r; + + r = mixed + mixed; + r = mixed - mixed; + r = mixed + var; + r = mixed + constant; + r = mixed - var; + r = mixed - constant; + r = var - mixed; + + r = var * constant; + r = constant / var; + r = constant * constant; + r = constant / constant; + + r = mixed * var; + r = mixed / var; + r = mixed * mixed; + r = mixed * constant; + bool result = CircuitChecker::check(builder); + EXPECT_EQ(result, true); + } // The bug happens when we are applying the CRT formula to a*b < r, which can happen when using the division // operator static void test_division_formula_bug() @@ -254,7 +283,7 @@ template class stdlib_bigfield : public testing::Test { { auto builder = Builder(); size_t num_repetitions = 1; - const size_t number_of_madds = 32; + const size_t number_of_madds = 16; for (size_t i = 0; i < num_repetitions; ++i) { fq mul_left_values[number_of_madds]; fq mul_right_values[number_of_madds]; @@ -973,10 +1002,10 @@ template class stdlib_bigfield : public testing::Test { witness_ct(&builder, fr(uint256_t(inputs[3]).slice(fq_ct::NUM_LIMB_BITS * 2, fq_ct::NUM_LIMB_BITS * 4)))); - fq_ct two(witness_ct(&builder, fr(2)), - witness_ct(&builder, fr(0)), - witness_ct(&builder, fr(0)), - witness_ct(&builder, fr(0))); + fq_ct two = fq_ct::unsafe_construct_from_limbs(witness_ct(&builder, fr(2)), + witness_ct(&builder, fr(0)), + witness_ct(&builder, fr(0)), + witness_ct(&builder, fr(0))); fq_ct t0 = a + a; fq_ct t1 = a * two; @@ -998,31 +1027,219 @@ template class stdlib_bigfield : public testing::Test { fq base_val(engine.get_random_uint256()); uint32_t exponent_val = engine.get_random_uint32(); - fq expected = base_val.pow(exponent_val); + // Set the high bit + exponent_val |= static_cast(1) << 31; + fq_ct base_constant(&builder, static_cast(base_val)); + auto base_witness = fq_ct::from_witness(&builder, static_cast(base_val)); + // This also tests for the case where the exponent is zero + for (size_t i = 0; i <= 32; i += 4) { + auto current_exponent_val = exponent_val >> i; + fq expected = base_val.pow(current_exponent_val); + + // Check for constant bigfield element with constant exponent + fq_ct result_constant_base = base_constant.pow(current_exponent_val); + EXPECT_EQ(fq(result_constant_base.get_value()), expected); + + // Check for witness exponent with constant base + fr_ct witness_exponent = witness_ct(&builder, current_exponent_val); + auto result_witness_exponent = base_constant.pow(witness_exponent); + EXPECT_EQ(fq(result_witness_exponent.get_value()), expected); + + // Check for witness base with constant exponent + fq_ct result_witness_base = base_witness.pow(current_exponent_val); + EXPECT_EQ(fq(result_witness_base.get_value()), expected); + + // Check for all witness + base_witness.set_origin_tag(submitted_value_origin_tag); + witness_exponent.set_origin_tag(challenge_origin_tag); + + fq_ct result_all_witness = base_witness.pow(witness_exponent); + EXPECT_EQ(fq(result_all_witness.get_value()), expected); + EXPECT_EQ(result_all_witness.get_origin_tag(), first_two_merged_tag); + } + + bool check_result = CircuitChecker::check(builder); + EXPECT_EQ(check_result, true); + } + + static void test_pow_one() + { + Builder builder; + + fq base_val(engine.get_random_uint256()); + + uint32_t current_exponent_val = 1; + fq_ct base_constant(&builder, static_cast(base_val)); + auto base_witness = fq_ct::from_witness(&builder, static_cast(base_val)); + fq expected = base_val.pow(current_exponent_val); + + // Check for constant bigfield element with constant exponent + fq_ct result_constant_base = base_constant.pow(current_exponent_val); + EXPECT_EQ(fq(result_constant_base.get_value()), expected); - fq_ct base(&builder, static_cast(base_val)); - fq_ct result = base.pow(exponent_val); - EXPECT_EQ(fq(result.get_value()), expected); + // Check for witness exponent with constant base + fr_ct witness_exponent = witness_ct(&builder, current_exponent_val); + auto result_witness_exponent = base_constant.pow(witness_exponent); + EXPECT_EQ(fq(result_witness_exponent.get_value()), expected); - fr_ct exponent = witness_ct(&builder, exponent_val); - base.set_origin_tag(submitted_value_origin_tag); - exponent.set_origin_tag(challenge_origin_tag); - result = base.pow(exponent); - EXPECT_EQ(fq(result.get_value()), expected); + // Check for witness base with constant exponent + fq_ct result_witness_base = base_witness.pow(current_exponent_val); + EXPECT_EQ(fq(result_witness_base.get_value()), expected); - EXPECT_EQ(result.get_origin_tag(), first_two_merged_tag); + // Check for all witness + fq_ct result_all_witness = base_witness.pow(witness_exponent); + EXPECT_EQ(fq(result_all_witness.get_value()), expected); - info("num gates = ", builder.get_estimated_num_finalized_gates()); bool check_result = CircuitChecker::check(builder); EXPECT_EQ(check_result, true); } + + static void test_nonnormalized_field_bug_regression() + { + auto builder = Builder(); + fr_ct zero = witness_ct::create_constant_witness(&builder, fr::zero()); + uint256_t two_to_68 = uint256_t(1) << fq_ct::NUM_LIMB_BITS; + // construct bigfield where the low limb has a non-trivial `additive_constant` + fq_ct z(zero + two_to_68, zero); + // assert invariant for every limb: actual value <= maximum value + // Failed in the past for for StandardCircuitBuilder + for (auto zi : z.binary_basis_limbs) { + EXPECT_LE(uint256_t(zi.element.get_value()), zi.maximum_value); + } + } + + static void test_msub_div_ctx_crash_regression() + { + auto builder = Builder(); + fq_ct witness_one = fq_ct::create_from_u512_as_witness(&builder, uint256_t(1)); + fq_ct constant_one(1); + fq_ct::msub_div({ witness_one }, { witness_one }, constant_one, { witness_one }, true); + bool result = CircuitChecker::check(builder); + EXPECT_EQ(result, true); + } + + static void test_internal_div_regression() + { + typedef stdlib::bool_t bool_t; + auto builder = Builder(); + + fq_ct w0 = fq_ct::from_witness(&builder, 1); + w0 = w0.conditional_negate(bool_t(&builder, true)); + w0 = w0.conditional_negate(bool_t(&builder, false)); + w0 = w0.conditional_negate(bool_t(&builder, true)); + w0 = w0.conditional_negate(bool_t(&builder, true)); + fq_ct w4 = w0.conditional_negate(bool_t(&builder, false)); + w4 = w4.conditional_negate(bool_t(&builder, true)); + w4 = w4.conditional_negate(bool_t(&builder, true)); + fq_ct w5 = w4 - w0; + fq_ct w6 = w5 / 1; + (void)(w6); + EXPECT_TRUE(CircuitChecker::check(builder)); + } + + static void test_internal_div_regression2() + { + auto builder = Builder(); + + fq_ct numerator = fq_ct::create_from_u512_as_witness(&builder, uint256_t(1) << (68 + 67)); + numerator.binary_basis_limbs[0].maximum_value = 0; + numerator.binary_basis_limbs[1].maximum_value = uint256_t(1) << 67; + numerator.binary_basis_limbs[2].maximum_value = 0; + numerator.binary_basis_limbs[3].maximum_value = 0; + + for (size_t i = 0; i < 9; i++) { + numerator = numerator + numerator; + } + fq_ct denominator = fq_ct::create_from_u512_as_witness(&builder, uint256_t(1)); + fq_ct result = numerator / denominator; + (void)(result); + EXPECT_TRUE(CircuitChecker::check(builder)); + } + + static void test_internal_div_regression3() + { + Builder builder; + uint256_t dlimb0_value = uint256_t("0x00000000000000000000000000000000000000000000000bef7fa109038857fc"); + uint256_t dlimb0_max = uint256_t("0x00000000000000000000000000000000000000000000000fffffffffffffffff"); + uint256_t dlimb1_value = uint256_t("0x0000000000000000000000000000000000000000000000056f10535779f56339"); + uint256_t dlimb1_max = uint256_t("0x00000000000000000000000000000000000000000000000fffffffffffffffff"); + uint256_t dlimb2_value = uint256_t("0x00000000000000000000000000000000000000000000000c741f60a1ec4e114e"); + uint256_t dlimb2_max = uint256_t("0x00000000000000000000000000000000000000000000000fffffffffffffffff"); + uint256_t dlimb3_value = uint256_t("0x000000000000000000000000000000000000000000000000000286b3cd344d8b"); + uint256_t dlimb3_max = uint256_t("0x0000000000000000000000000000000000000000000000000003ffffffffffff"); + uint256_t dlimb_prime = uint256_t("0x286b3cd344d8bc741f60a1ec4e114e56f10535779f56339bef7fa109038857fc"); + + uint256_t nlimb0_value = uint256_t("0x00000000000000000000000000000000000000000000080a84d9bea2b012417c"); + uint256_t nlimb0_max = uint256_t("0x000000000000000000000000000000000000000000000ff7c7469df4081b61fc"); + uint256_t nlimb1_value = uint256_t("0x00000000000000000000000000000000000000000000080f50ee84526e8e5ba7"); + uint256_t nlimb1_max = uint256_t("0x000000000000000000000000000000000000000000000ffef965c67ba5d5893c"); + uint256_t nlimb2_value = uint256_t("0x00000000000000000000000000000000000000000000080aba136ca8eaf6dc1b"); + uint256_t nlimb2_max = uint256_t("0x000000000000000000000000000000000000000000000ff8171d22fd607249ea"); + uint256_t nlimb3_value = uint256_t("0x00000000000000000000000000000000000000000000000001f0042419843c29"); + uint256_t nlimb3_max = uint256_t("0x00000000000000000000000000000000000000000000000003e00636264659ff"); + uint256_t nlimb_prime = uint256_t("0x000000000000000000000000000000474da776b8ee19a56b08186bdcf01240d8"); + + fq_ct w0 = fq_ct::from_witness(&builder, bb::fq(0)); + w0.binary_basis_limbs[0].element = witness_ct(&builder, dlimb0_value); + w0.binary_basis_limbs[1].element = witness_ct(&builder, dlimb1_value); + w0.binary_basis_limbs[2].element = witness_ct(&builder, dlimb2_value); + w0.binary_basis_limbs[3].element = witness_ct(&builder, dlimb3_value); + w0.binary_basis_limbs[0].maximum_value = dlimb0_max; + w0.binary_basis_limbs[1].maximum_value = dlimb1_max; + w0.binary_basis_limbs[2].maximum_value = dlimb2_max; + w0.binary_basis_limbs[3].maximum_value = dlimb3_max; + w0.prime_basis_limb = witness_ct(&builder, dlimb_prime); + + fq_ct w1 = fq_ct::from_witness(&builder, bb::fq(0)); + w1.binary_basis_limbs[0].element = witness_ct(&builder, nlimb0_value); + w1.binary_basis_limbs[1].element = witness_ct(&builder, nlimb1_value); + w1.binary_basis_limbs[2].element = witness_ct(&builder, nlimb2_value); + w1.binary_basis_limbs[3].element = witness_ct(&builder, nlimb3_value); + w1.binary_basis_limbs[0].maximum_value = nlimb0_max; + w1.binary_basis_limbs[1].maximum_value = nlimb1_max; + w1.binary_basis_limbs[2].maximum_value = nlimb2_max; + w1.binary_basis_limbs[3].maximum_value = nlimb3_max; + w1.prime_basis_limb = witness_ct(&builder, nlimb_prime); + + fq_ct w2 = w1 / w0; + (void)w2; + EXPECT_TRUE(CircuitChecker::check(builder)); + } + static void test_assert_not_equal_regression() + { + auto builder = Builder(); + fq_ct zero = fq_ct::create_from_u512_as_witness(&builder, uint256_t(0)); + fq_ct alsozero = fq_ct::create_from_u512_as_witness(&builder, fq_ct::modulus_u512); + for (size_t i = 0; i < 4; i++) { + zero.binary_basis_limbs[i].maximum_value = zero.binary_basis_limbs[i].element.get_value(); + alsozero.binary_basis_limbs[i].maximum_value = alsozero.binary_basis_limbs[i].element.get_value(); + } + zero.assert_is_not_equal(alsozero); + bool result = CircuitChecker::check(builder); + EXPECT_EQ(result, false); + } }; // Define types for which the above tests will be constructed. using CircuitTypes = testing::Types; // Define the suite of tests. TYPED_TEST_SUITE(stdlib_bigfield, CircuitTypes); + +TYPED_TEST(stdlib_bigfield, assert_not_equal_regression) +{ + TestFixture::test_assert_not_equal_regression(); +} + +TYPED_TEST(stdlib_bigfield, add_to_lower_limb_regression) +{ + TestFixture::test_add_to_lower_limb_regression(); +} TYPED_TEST(stdlib_bigfield, badmul) +{ + TestFixture::test_bad_mul(); +} + +TYPED_TEST(stdlib_bigfield, division_formula_regression) { TestFixture::test_division_formula_bug(); } @@ -1070,6 +1287,10 @@ TYPED_TEST(stdlib_bigfield, msub_div) { TestFixture::test_msub_div(); } +TYPED_TEST(stdlib_bigfield, msb_div_ctx_crash_regression) +{ + TestFixture::test_msub_div_ctx_crash_regression(); +} TYPED_TEST(stdlib_bigfield, conditional_negate) { TestFixture::test_conditional_negate(); @@ -1124,6 +1345,22 @@ TYPED_TEST(stdlib_bigfield, pow) TestFixture::test_pow(); } +TYPED_TEST(stdlib_bigfield, pow_one) +{ + TestFixture::test_pow_one(); +} +TYPED_TEST(stdlib_bigfield, nonnormalized_field_bug_regression) +{ + TestFixture::test_nonnormalized_field_bug_regression(); +} + +TYPED_TEST(stdlib_bigfield, internal_div_bug_regression) +{ + TestFixture::test_internal_div_regression(); + TestFixture::test_internal_div_regression2(); + TestFixture::test_internal_div_regression3(); +} + // // This test was disabled before the refactor to use TYPED_TEST's/ // TEST(stdlib_bigfield, DISABLED_test_div_against_constants) // { diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield_impl.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield_impl.hpp index 3f4513773b9..bf522e86ff2 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield_impl.hpp @@ -1,5 +1,6 @@ #pragma once +#include "barretenberg/common/assert.hpp" #include "barretenberg/common/zip_view.hpp" #include "barretenberg/numeric/uint256/uint256.hpp" #include "barretenberg/numeric/uintx/uintx.hpp" @@ -39,6 +40,7 @@ bigfield::bigfield(const field_t& low_bits_in, const bool can_overflow, const size_t maximum_bitlength) { + ASSERT(low_bits_in.is_constant() == high_bits_in.is_constant()); ASSERT((can_overflow == true && maximum_bitlength == 0) || (can_overflow == false && (maximum_bitlength == 0 || maximum_bitlength > (3 * NUM_LIMB_BITS)))); @@ -51,12 +53,12 @@ bigfield::bigfield(const field_t& low_bits_in, field_t limb_1(context); field_t limb_2(context); field_t limb_3(context); - if (low_bits_in.witness_index != IS_CONSTANT) { + if (!low_bits_in.is_constant()) { std::vector low_accumulator; if constexpr (HasPlookup) { // MERGE NOTE: this was the if constexpr block introduced in ecebe7643 const auto limb_witnesses = - context->decompose_non_native_field_double_width_limb(low_bits_in.normalize().witness_index); + context->decompose_non_native_field_double_width_limb(low_bits_in.get_normalized_witness_index()); limb_0.witness_index = limb_witnesses[0]; limb_1.witness_index = limb_witnesses[1]; field_t::evaluate_linear_identity(low_bits_in, -limb_0, -limb_1 * shift_1, field_t(0)); @@ -73,8 +75,9 @@ bigfield::bigfield(const field_t& low_bits_in, // limb_1 = (low_bits_in - limb_0) * shift_right_1; } else { size_t mid_index; - low_accumulator = context->decompose_into_base4_accumulators( - low_bits_in.witness_index, static_cast(NUM_LIMB_BITS * 2), "bigfield: low_bits_in too large."); + low_accumulator = context->decompose_into_base4_accumulators(low_bits_in.get_normalized_witness_index(), + static_cast(NUM_LIMB_BITS * 2), + "bigfield: low_bits_in too large."); mid_index = static_cast((NUM_LIMB_BITS / 2) - 1); // Range constraint returns an array of partial sums, midpoint will happen to hold the big limb // value @@ -103,23 +106,23 @@ bigfield::bigfield(const field_t& low_bits_in, } // We create the high limb values similar to the low limb ones above const uint64_t num_high_limb_bits = NUM_LIMB_BITS + num_last_limb_bits; - if (high_bits_in.witness_index != IS_CONSTANT) { + if (!high_bits_in.is_constant()) { std::vector high_accumulator; if constexpr (HasPlookup) { const auto limb_witnesses = context->decompose_non_native_field_double_width_limb( - high_bits_in.normalize().witness_index, (size_t)num_high_limb_bits); + high_bits_in.get_normalized_witness_index(), (size_t)num_high_limb_bits); limb_2.witness_index = limb_witnesses[0]; limb_3.witness_index = limb_witnesses[1]; field_t::evaluate_linear_identity(high_bits_in, -limb_2, -limb_3 * shift_1, field_t(0)); } else { - high_accumulator = context->decompose_into_base4_accumulators(high_bits_in.witness_index, + high_accumulator = context->decompose_into_base4_accumulators(high_bits_in.get_normalized_witness_index(), static_cast(num_high_limb_bits), "bigfield: high_bits_in too large."); if constexpr (!IsSimulator) { - limb_3.witness_index = high_accumulator[static_cast((num_last_limb_bits / 2) - 1)]; + limb_3.witness_index = high_accumulator[static_cast(((num_last_limb_bits + 1) / 2) - 1)]; } limb_2 = (high_bits_in - (limb_3 * shift_1)); } @@ -171,8 +174,8 @@ bigfield::bigfield(bigfield&& other) * @param ctx * @param value * @param can_overflow Can the input value have more than log2(modulus) bits? - * @param maximum_bitlength Provide the explicit maximum bitlength if known. Otherwise bigfield max value will be either - * log2(modulus) bits iff can_overflow = false, or (4 * NUM_LIMB_BITS) iff can_overflow = true + * @param maximum_bitlength Provide the explicit maximum bitlength if known. Otherwise bigfield max value will be + * either log2(modulus) bits iff can_overflow = false, or (4 * NUM_LIMB_BITS) iff can_overflow = true * @return bigfield * * @details This method is 1 gate more efficient than constructing from 2 field_ct elements. @@ -204,19 +207,20 @@ bigfield bigfield::create_from_u512_as_witness(Builder* prime_limb.witness_index = ctx->add_variable(limb_0.get_value() + limb_1.get_value() * shift_1 + limb_2.get_value() * shift_2 + limb_3.get_value() * shift_3); // evaluate prime basis limb with addition gate that taps into the 4th wire in the next gate - ctx->create_big_add_gate({ limb_1.witness_index, - limb_2.witness_index, - limb_3.witness_index, - prime_limb.witness_index, + ctx->create_big_add_gate({ limb_1.get_normalized_witness_index(), + limb_2.get_normalized_witness_index(), + limb_3.get_normalized_witness_index(), + prime_limb.get_normalized_witness_index(), shift_1, shift_2, shift_3, -1, 0 }, true); - // TODO(https://github.com/AztecProtocol/barretenberg/issues/879): dummy necessary for preceeding big add gate + // TODO(https://github.com/AztecProtocol/barretenberg/issues/879): dummy necessary for preceeding big add + // gate ctx->create_dummy_gate( - ctx->blocks.arithmetic, ctx->zero_idx, ctx->zero_idx, ctx->zero_idx, limb_0.witness_index); + ctx->blocks.arithmetic, ctx->zero_idx, ctx->zero_idx, ctx->zero_idx, limb_0.get_normalized_witness_index()); uint64_t num_last_limb_bits = (can_overflow) ? NUM_LIMB_BITS : NUM_LAST_LIMB_BITS; @@ -235,10 +239,14 @@ bigfield bigfield::create_from_u512_as_witness(Builder* result.binary_basis_limbs[3].maximum_value = max_limb_value; } result.prime_basis_limb = prime_limb; - ctx->range_constrain_two_limbs( - limb_0.witness_index, limb_1.witness_index, (size_t)NUM_LIMB_BITS, (size_t)NUM_LIMB_BITS); - ctx->range_constrain_two_limbs( - limb_2.witness_index, limb_3.witness_index, (size_t)NUM_LIMB_BITS, (size_t)num_last_limb_bits); + ctx->range_constrain_two_limbs(limb_0.get_normalized_witness_index(), + limb_1.get_normalized_witness_index(), + (size_t)NUM_LIMB_BITS, + (size_t)NUM_LIMB_BITS); + ctx->range_constrain_two_limbs(limb_2.get_normalized_witness_index(), + limb_3.get_normalized_witness_index(), + (size_t)NUM_LIMB_BITS, + (size_t)num_last_limb_bits); return result; } else { @@ -291,7 +299,7 @@ template bigfield::bigfield(const byt const auto [limb0, limb1] = reconstruct_two_limbs(ctx, lo_8_bytes, lolo_8_bytes, lo_split_byte); const auto [limb2, limb3] = reconstruct_two_limbs(ctx, hi_8_bytes, mid_8_bytes, mid_split_byte); - const auto res = bigfield(limb0, limb1, limb2, limb3, true); + const auto res = bigfield::unsafe_construct_from_limbs(limb0, limb1, limb2, limb3, true); const auto num_last_limb_bits = 256 - (NUM_LIMB_BITS * 3); res.binary_basis_limbs[3].maximum_value = (uint64_t(1) << num_last_limb_bits); @@ -340,13 +348,14 @@ template uint512_t bigfield::get_maxi } /** - * @brief Add a field element to the lower limb. CAUTION (the element has to be constrained before using this function) + * @brief Add a field element to the lower limb. CAUTION (the element has to be constrained before using this + * function) * - * @details Sometimes we need to add a small constrained value to a bigfield element (for example, a boolean value), but - * we don't want to construct a full bigfield element for that as it would take too many gates. If the maximum value of - * the field element being added is small enough, we can simply add it to the lowest limb and increase its maximum - * value. That will create 2 additional constraints instead of 5/3 needed to add 2 bigfield elements and several needed - * to construct a bigfield element. + * @details Sometimes we need to add a small constrained value to a bigfield element (for example, a boolean value), + * but we don't want to construct a full bigfield element for that as it would take too many gates. If the maximum + * value of the field element being added is small enough, we can simply add it to the lowest limb and increase its + * maximum value. That will create 2 additional constraints instead of 5/3 needed to add 2 bigfield elements and + * several needed to construct a bigfield element. * * @tparam Builder Builder * @tparam T Field Parameters @@ -359,14 +368,33 @@ bigfield bigfield::add_to_lower_limb(const field_t::from_witness(context, binary_basis_limbs[i].element.get_value()), + binary_basis_limbs[i].maximum_value); + // Ensure it is fixed + result.binary_basis_limbs[i].element.fix_witness(); + result.context = ctx; + } + } else { + + // if this element is a witness, then all limbs will be witnesses + result = *this; + } result.binary_basis_limbs[0].maximum_value = binary_basis_limbs[0].maximum_value + other_maximum_value; result.binary_basis_limbs[0].element = binary_basis_limbs[0].element + other; @@ -411,7 +439,11 @@ bigfield bigfield::operator+(const bigfield& other) cons limbconst = limbconst || other.binary_basis_limbs[2].element.is_constant(); limbconst = limbconst || other.binary_basis_limbs[3].element.is_constant(); limbconst = limbconst || other.prime_basis_limb.is_constant(); - limbconst = limbconst || (prime_basis_limb.witness_index == other.prime_basis_limb.witness_index); + limbconst = + limbconst || (prime_basis_limb.get_witness_index() == + other.prime_basis_limb + .get_witness_index()); // We are comparing if the bigfield elements are exactly the + // same object, so we compare the unnormalized witness indices if (!limbconst) { std::pair x0{ binary_basis_limbs[0].element.witness_index, binary_basis_limbs[0].element.multiplicative_constant }; @@ -441,7 +473,6 @@ bigfield bigfield::operator+(const bigfield& other) cons uint32_t xp(prime_basis_limb.witness_index); uint32_t yp(other.prime_basis_limb.witness_index); bb::fr cp(prime_basis_limb.additive_constant + other.prime_basis_limb.additive_constant); - const auto output_witnesses = ctx->evaluate_non_native_field_addition( { x0, y0, c0 }, { x1, y1, c1 }, { x2, y2, c2 }, { x3, y3, c3 }, { xp, yp, cp }); result.binary_basis_limbs[0].element = field_t::from_witness_index(ctx, output_witnesses[0]); @@ -572,7 +603,8 @@ bigfield bigfield::operator-(const bigfield& other) cons * Step 3: Compute offset terms t0, t1, t2, t3 that we add to our result to ensure each limb is positive * * t3 represents the value we are BORROWING from constant_to_add.limb[3] - * t2, t1, t0 are the terms we will ADD to constant_to_add.limb[2], constant_to_add.limb[1], constant_to_add.limb[0] + * t2, t1, t0 are the terms we will ADD to constant_to_add.limb[2], constant_to_add.limb[1], + *constant_to_add.limb[0] * * i.e. The net value we add to `constant_to_add` is 0. We must ensure that: * t3 = t0 + (t1 << NUM_LIMB_BITS) + (t2 << NUM_LIMB_BITS * 2) @@ -587,8 +619,8 @@ bigfield bigfield::operator-(const bigfield& other) cons uint256_t t3(uint256_t(1) << (limb_2_borrow_shift - NUM_LIMB_BITS)); /** - * Compute the limbs of `constant_to_add`, including our offset terms t0, t1, t2, t3 that ensure each result limb is - *positive + * Compute the limbs of `constant_to_add`, including our offset terms t0, t1, t2, t3 that ensure each result + *limb is positive **/ uint256_t to_add_0 = uint256_t(constant_to_add.slice(0, NUM_LIMB_BITS)) + t0; uint256_t to_add_1 = uint256_t(constant_to_add.slice(NUM_LIMB_BITS, NUM_LIMB_BITS * 2)) + t1; @@ -624,7 +656,11 @@ bigfield bigfield::operator-(const bigfield& other) cons limbconst = limbconst || other.binary_basis_limbs[2].element.is_constant(); limbconst = limbconst || other.binary_basis_limbs[3].element.is_constant(); limbconst = limbconst || other.prime_basis_limb.is_constant(); - limbconst = limbconst || (prime_basis_limb.witness_index == other.prime_basis_limb.witness_index); + limbconst = + limbconst || + (prime_basis_limb.witness_index == + other.prime_basis_limb.witness_index); // We are checking if this is and identical element, so we + // need to compare the actual indices, not normalized ones if (!limbconst) { std::pair x0{ result.binary_basis_limbs[0].element.witness_index, binary_basis_limbs[0].element.multiplicative_constant }; @@ -713,10 +749,11 @@ bigfield bigfield::operator*(const bigfield& other) cons return remainder; } else { // when writing a*b = q*p + r we wish to enforce r<2^s for smallest s such that p<2^s - // hence the second constructor call is with can_overflow=false. This will allow using r in more additions mod - // 2^t without needing to apply the mod, where t=4*NUM_LIMB_BITS + // hence the second constructor call is with can_overflow=false. This will allow using r in more additions + // mod 2^t without needing to apply the mod, where t=4*NUM_LIMB_BITS - // Check if the product overflows CRT or the quotient can't be contained in a range proof and reduce accordingly + // Check if the product overflows CRT or the quotient can't be contained in a range proof and reduce + // accordingly auto [reduction_required, num_quotient_bits] = get_quotient_reduction_info({ get_maximum_value() }, { other.get_maximum_value() }, {}); if (reduction_required) { @@ -739,8 +776,8 @@ bigfield bigfield::operator*(const bigfield& other) cons } /** - * Division operator. Doesn't create constraints for b!=0, which can lead to vulnerabilities. If you need a safer - *variant use div_check_denominator_nonzero. + * Division operator. Create constraints for b!=0 by default. If you need a variant + *without the zero check, use div_without_denominator_check. * * To evaluate (a / b = c mod p), we instead evaluate (c * b = a mod p). **/ @@ -748,7 +785,7 @@ template bigfield bigfield::operator/(const bigfield& other) const { - return internal_div({ *this }, other, false); + return internal_div({ *this }, other, true); } /** * @brief Create constraints for summing these terms @@ -812,7 +849,11 @@ bigfield bigfield::internal_div(const std::vector bigfield::div_without_denominator_check(const s return internal_div(numerators, denominator, false); } +template +bigfield bigfield::div_without_denominator_check(const bigfield& denominator) +{ + return internal_div({ *this }, denominator, false); +} + /** * Div method with constraints for denominator!=0. * @@ -929,6 +976,7 @@ template bigfield bigfield bigfield bigfield::sqradd(const std::vector& to_add) const { + ASSERT(to_add.size() <= MAXIMUM_SUMMAND_COUNT); reduction_check(); Builder* ctx = context; @@ -993,36 +1041,62 @@ bigfield bigfield::sqradd(const std::vector& t /** * @brief Raise a bigfield to a power of an exponent. Note that the exponent must not exceed 32 bits and is - * implicitly range constrained. The exponent is turned into a field_t witness for the underlying pow method - * to work. + * implicitly range constrained. * * @returns this ** (exponent) * * @todo TODO(https://github.com/AztecProtocol/barretenberg/issues/1014) Improve the efficiency of this function. - * @todo TODO(https://github.com/AztecProtocol/barretenberg/issues/1015) Security of this (as part of the whole class) + * @todo TODO(https://github.com/AztecProtocol/barretenberg/issues/1015) Security of this (as part of the whole + * class) */ template bigfield bigfield::pow(const size_t exponent) const { - auto* ctx = get_context() ? get_context() : nullptr; + // Just return one immediately + + if (exponent == 0) { + return bigfield(uint256_t(1)); + } + + bool accumulator_initialized = false; + bigfield accumulator; + bigfield running_power = *this; + auto shifted_exponent = exponent; - return pow(witness_t(ctx, exponent)); + // Square and multiply + while (shifted_exponent != 0) { + if (shifted_exponent & 1) { + if (!accumulator_initialized) { + accumulator = running_power; + accumulator_initialized = true; + } else { + accumulator *= running_power; + } + } + if (shifted_exponent != 0) { + running_power = running_power.sqr(); + } + shifted_exponent >>= 1; + } + return accumulator; } /** - * @brief Raise a bigfield to a power of an exponent (field_t) that must be a witness. Note that the exponent must not - * exceed 32 bits and is implicitly range constrained. + * @brief Raise a bigfield to a power of an exponent (field_t) that must be a witness. Note that the exponent must + * not exceed 32 bits and is implicitly range constrained. * * @returns this ** (exponent) * * @todo TODO(https://github.com/AztecProtocol/barretenberg/issues/1014) Improve the efficiency of this function. - * @todo TODO(https://github.com/AztecProtocol/barretenberg/issues/1015) Security of this (as part of the whole class) + * @todo TODO(https://github.com/AztecProtocol/barretenberg/issues/1015) Security of this (as part of the whole + * class) */ template bigfield bigfield::pow(const field_t& exponent) const { auto* ctx = get_context() ? get_context() : exponent.get_context(); uint256_t exponent_value = exponent.get_value(); + if constexpr (IsSimulator) { if ((exponent_value >> 32) != static_cast(0)) { ctx->failure("field_t::pow exponent accumulator incorrect"); @@ -1033,34 +1107,37 @@ bigfield bigfield::pow(const field_t& exponent) return result; } - bool exponent_constant = exponent.is_constant(); + ASSERT(exponent_value.get_msb() < 32); + // Use the constant version that perfoms only the necessary multiplications if the exponent is constant + if (exponent.is_constant()) { + return this->pow(static_cast(exponent_value)); + } std::vector> exponent_bits(32); + // Collect individual bits as bool_t's for (size_t i = 0; i < exponent_bits.size(); ++i) { uint256_t value_bit = exponent_value & 1; bool_t bit; - bit = exponent_constant ? bool_t(ctx, value_bit.data[0]) : witness_t(ctx, value_bit.data[0]); + bit = bool_t(witness_t(ctx, value_bit.data[0])); exponent_bits[31 - i] = (bit); exponent_value >>= 1; } - if (!exponent_constant) { - field_t exponent_accumulator(ctx, 0); - for (const auto& bit : exponent_bits) { - exponent_accumulator += exponent_accumulator; - exponent_accumulator += bit; - } - exponent.assert_equal(exponent_accumulator, "field_t::pow exponent accumulator incorrect"); + field_t exponent_accumulator(ctx, 0); + + // Reconstruct the exponent from bits + for (const auto& bit : exponent_bits) { + exponent_accumulator += exponent_accumulator; + exponent_accumulator += field_t(bit); } + + // Ensure it's equal to the original + exponent.assert_equal(exponent_accumulator, "field_t::pow exponent accumulator incorrect"); bigfield accumulator(ctx, 1); - bigfield mul_coefficient = *this - 1; + bigfield one(1); + // Compute the power with a square-and-multiply algorithm for (size_t digit_idx = 0; digit_idx < 32; ++digit_idx) { accumulator *= accumulator; - const bigfield bit(field_t(exponent_bits[digit_idx]), - field_t(witness_t(ctx, 0)), - field_t(witness_t(ctx, 0)), - field_t(witness_t(ctx, 0)), - /*can_overflow=*/true); - accumulator *= (mul_coefficient * bit + 1); + accumulator *= one.conditional_select(*this, exponent_bits[digit_idx]); } accumulator.self_reduce(); accumulator.set_origin_tag(OriginTag(get_origin_tag(), exponent.tag)); @@ -1078,6 +1155,7 @@ bigfield bigfield::pow(const field_t& exponent) template bigfield bigfield::madd(const bigfield& to_mul, const std::vector& to_add) const { + ASSERT(to_add.size() <= MAXIMUM_SUMMAND_COUNT); Builder* ctx = context ? context : to_mul.context; reduction_check(); to_mul.reduction_check(); @@ -1147,6 +1225,10 @@ void bigfield::perform_reductions_for_mult_madd(std::vector& mul_right, const std::vector& to_add) { + ASSERT(mul_left.size() == mul_right.size()); + ASSERT(to_add.size() <= MAXIMUM_SUMMAND_COUNT); + ASSERT(mul_left.size() <= MAXIMUM_SUMMAND_COUNT); + const size_t number_of_products = mul_left.size(); // Get the maximum values of elements std::vector max_values_left; @@ -1221,19 +1303,20 @@ void bigfield::perform_reductions_for_mult_madd(std::vector& left_element, std::tuple& right_element) { return std::get<0>(left_element) > std::get<0>(right_element); }; - // Sort the vector, larger values first - std::sort(maximum_value_updates.begin(), maximum_value_updates.end(), compare_update_tuples); // Now we loop through, reducing 1 element each time. This is costly in code, but allows us to use fewer // gates while (reduction_required) { + // Compute the possible reduction updates + compute_updates(maximum_value_updates, mul_left, mul_right, number_of_products); + + // Sort the vector, larger values first + std::sort(maximum_value_updates.begin(), maximum_value_updates.end(), compare_update_tuples); // We choose the largest update auto [update_size, largest_update_product_index, multiplicand_index] = maximum_value_updates[0]; @@ -1247,19 +1330,14 @@ void bigfield::perform_reductions_for_mult_madd(std::vector( - get_quotient_reduction_info(max_values_left, max_values_right, to_add, { DEFAULT_MAXIMUM_REMAINDER })); - - compute_updates(maximum_value_updates, mul_left, mul_right, number_of_products); - for (size_t i = 0; i < number_of_products; i++) { max_values_left[i] = mul_left[i].get_maximum_value(); max_values_right[i] = mul_right[i].get_maximum_value(); } - - // Sort the vector - std::sort(maximum_value_updates.begin(), maximum_value_updates.end(), compare_update_tuples); + reduction_required = std::get<0>( + get_quotient_reduction_info(max_values_left, max_values_right, to_add, { DEFAULT_MAXIMUM_REMAINDER })); } + // Now we have reduced everything exactly to the point of no overflow. There is probably a way to use even // fewer reductions, but for now this will suffice. } @@ -1281,6 +1359,8 @@ bigfield bigfield::mult_madd(const std::vector bool fix_remainder_to_zero) { ASSERT(mul_left.size() == mul_right.size()); + ASSERT(mul_left.size() <= MAXIMUM_SUMMAND_COUNT); + ASSERT(to_add.size() <= MAXIMUM_SUMMAND_COUNT); std::vector mutable_mul_left(mul_left); std::vector mutable_mul_right(mul_right); @@ -1381,7 +1461,8 @@ bigfield bigfield::mult_madd(const std::vector } } - // Now that we know that there is at least 1 non-constant multiplication, we can start estimating reductions, etc + // Now that we know that there is at least 1 non-constant multiplication, we can start estimating reductions, + // etc // Compute the constant term we're adding const auto [_, constant_part_remainder_1024] = (sum_of_constant_products + add_right_constant_sum).divmod(modulus); @@ -1409,8 +1490,8 @@ bigfield bigfield::mult_madd(const std::vector // Check that we can actually reduce the products enough, this assert will probably never get triggered ASSERT((worst_case_product_sum + add_right_maximum) < get_maximum_crt_product()); - // We've collapsed all constants, checked if we can compute the sum of products in the worst case, time to check if - // we need to reduce something + // We've collapsed all constants, checked if we can compute the sum of products in the worst case, time to check + // if we need to reduce something perform_reductions_for_mult_madd(new_input_left, new_input_right, new_to_add); uint1024_t sum_of_products_final(0); for (size_t i = 0; i < final_number_of_products; i++) { @@ -1464,6 +1545,7 @@ bigfield bigfield::dual_madd(const bigfield& left_a, const bigfield& right_b, const std::vector& to_add) { + ASSERT(to_add.size() <= MAXIMUM_SUMMAND_COUNT); left_a.reduction_check(); right_a.reduction_check(); left_b.reduction_check(); @@ -1500,12 +1582,6 @@ bigfield bigfield::msub_div(const std::vector& const std::vector& to_sub, bool enable_divisor_nz_check) { - Builder* ctx = divisor.context; - - const size_t num_multiplications = mul_left.size(); - native product_native = 0; - bool products_constant = true; - // Check the basics ASSERT(mul_left.size() == mul_right.size()); ASSERT(divisor.get_value() != 0); @@ -1517,6 +1593,35 @@ bigfield bigfield::msub_div(const std::vector& for (auto& element : to_sub) { new_tag = OriginTag(new_tag, element.get_origin_tag()); } + // Gett he context + Builder* ctx = divisor.context; + if (ctx == NULL) { + for (auto& el : mul_left) { + if (el.context != NULL) { + ctx = el.context; + break; + } + } + } + if (ctx == NULL) { + for (auto& el : mul_right) { + if (el.context != NULL) { + ctx = el.context; + break; + } + } + } + if (ctx == NULL) { + for (auto& el : to_sub) { + if (el.context != NULL) { + ctx = el.context; + break; + } + } + } + const size_t num_multiplications = mul_left.size(); + native product_native = 0; + bool products_constant = true; // This check is optional, because it is heavy and often we don't need it at all if (enable_divisor_nz_check) { @@ -1550,8 +1655,10 @@ bigfield bigfield::msub_div(const std::vector& if (sub_constant && products_constant && divisor.is_constant()) { auto result = bigfield(ctx, uint256_t(result_value.lo.lo)); result.set_origin_tag(new_tag); + return result; } + ASSERT(ctx != NULL); // Create the result witness bigfield result = create_from_u512_as_witness(ctx, result_value.lo); @@ -1578,6 +1685,7 @@ bigfield bigfield::conditional_negate(const bool_t bigfield::conditional_negate(const bool_t>(predicate).madd(-(binary_basis_limbs[3].element * two) + to_add_3, binary_basis_limbs[3].element); - uint256_t max_limb_0 = binary_basis_limbs[0].maximum_value + to_add_0_u256 + t0; - uint256_t max_limb_1 = binary_basis_limbs[1].maximum_value + to_add_1_u256 + t1; - uint256_t max_limb_2 = binary_basis_limbs[2].maximum_value + to_add_2_u256 + t2; - uint256_t max_limb_3 = binary_basis_limbs[3].maximum_value + to_add_3_u256 - t3; + uint256_t maximum_negated_limb_0 = to_add_0_u256 + t0; + uint256_t maximum_negated_limb_1 = to_add_1_u256 + t1; + uint256_t maximum_negated_limb_2 = to_add_2_u256 + t2; + uint256_t maximum_negated_limb_3 = to_add_3_u256; + + uint256_t max_limb_0 = binary_basis_limbs[0].maximum_value > maximum_negated_limb_0 + ? binary_basis_limbs[0].maximum_value + : maximum_negated_limb_0; + uint256_t max_limb_1 = binary_basis_limbs[1].maximum_value > maximum_negated_limb_1 + ? binary_basis_limbs[1].maximum_value + : maximum_negated_limb_1; + uint256_t max_limb_2 = binary_basis_limbs[2].maximum_value > maximum_negated_limb_2 + ? binary_basis_limbs[2].maximum_value + : maximum_negated_limb_2; + uint256_t max_limb_3 = binary_basis_limbs[3].maximum_value > maximum_negated_limb_3 + ? binary_basis_limbs[3].maximum_value + : maximum_negated_limb_3; bigfield result(ctx); result.binary_basis_limbs[0] = Limb(limb_0, max_limb_0); @@ -1717,7 +1838,8 @@ bigfield bigfield::conditional_select(const bigfield& ot * This allows us to evaluate `operator==` using only 1 bigfield multiplication operation. * We can check the product equals 0 or 1 by directly evaluating the binary basis/prime basis limbs of Y. * i.e. if `r == 1` then `(a - b)*X` should have 0 for all limb values - * if `r == 0` then `(a - b)*X` should have 1 in the least significant binary basis limb and 0 elsewhere + * if `r == 0` then `(a - b)*X` should have 1 in the least significant binary basis limb and 0 + * elsewhere * @tparam Builder * @tparam T * @param other @@ -1732,15 +1854,15 @@ template bool_t bigfield::op if (!ctx) { // TODO(https://github.com/AztecProtocol/barretenberg/issues/660): null context _should_ mean that both are // constant, but we check with an assertion to be sure. - ASSERT(is_constant() == other.is_constant()); + ASSERT(is_constant() && other.is_constant()); return is_equal_raw; } bool_t is_equal = witness_t(ctx, is_equal_raw); bigfield diff = (*this) - other; - // TODO(https://github.com/AztecProtocol/barretenberg/issues/999): get native values efficiently (i.e. if u512 value - // fits in a u256, subtract off modulus until u256 fits into finite field) + // TODO(https://github.com/AztecProtocol/barretenberg/issues/999): get native values efficiently (i.e. if u512 + // value fits in a u256, subtract off modulus until u256 fits into finite field) native diff_native = native((diff.get_value() % modulus_u512).lo); native inverse_native = is_equal_raw ? 0 : diff_native.invert(); @@ -1772,10 +1894,8 @@ template bool_t bigfield::op * This prevents our field arithmetic from overflowing the native modulus boundary, whilst ensuring we can * still use the chinese remainder theorem to validate field multiplications with a reduced number of range checks * - * @param num_products The number of products a*b in the parent function that calls the reduction check. Needed to - *limit overflow **/ -template void bigfield::reduction_check(const size_t num_products) const +template void bigfield::reduction_check() const { if (is_constant()) { // this seems not a reduction check, but actually computing the reduction @@ -1807,12 +1927,32 @@ template void bigfield::reduction_che bool limb_overflow_test_1 = binary_basis_limbs[1].maximum_value > maximum_limb_value; bool limb_overflow_test_2 = binary_basis_limbs[2].maximum_value > maximum_limb_value; bool limb_overflow_test_3 = binary_basis_limbs[3].maximum_value > maximum_limb_value; - if (get_maximum_value() > get_maximum_unreduced_value(num_products) || limb_overflow_test_0 || - limb_overflow_test_1 || limb_overflow_test_2 || limb_overflow_test_3) { + if (get_maximum_value() > get_maximum_unreduced_value() || limb_overflow_test_0 || limb_overflow_test_1 || + limb_overflow_test_2 || limb_overflow_test_3) { self_reduce(); } } +/** + * SANITY CHECK on a value that is about to interact with another value + * + * @details ASSERTs that the value of all limbs is less than or equal to the prohibited maximum value. Checks that the + *maximum value of the whole element is also less than a prohibited maximum value + * + **/ +template void bigfield::sanity_check() const +{ + + uint256_t maximum_limb_value = get_prohibited_maximum_limb_value(); + bool limb_overflow_test_0 = binary_basis_limbs[0].maximum_value > maximum_limb_value; + bool limb_overflow_test_1 = binary_basis_limbs[1].maximum_value > maximum_limb_value; + bool limb_overflow_test_2 = binary_basis_limbs[2].maximum_value > maximum_limb_value; + bool limb_overflow_test_3 = binary_basis_limbs[3].maximum_value > maximum_limb_value; + ASSERT(!(get_maximum_value() > get_prohibited_maximum_value() || limb_overflow_test_0 || limb_overflow_test_1 || + limb_overflow_test_2 || limb_overflow_test_3)); +} + +// Underneath performs assert_less_than(modulus) // create a version with mod 2^t element part in [0,p-1] // After reducing to size 2^s, we check (p-1)-a is non-negative as integer. // We perform subtraction using carries on blocks of size 2^b. The operations inside the blocks are done mod r @@ -1821,70 +1961,11 @@ template void bigfield::reduction_che // non-negative at the end of subtraction, we know the subtraction result is positive as integers and a

void bigfield::assert_is_in_field() const { - // Warning: this assumes we have run circuit construction at least once in debug mode where large non reduced - // constants are allowed via ASSERT - if (is_constant()) { - return; - } - - self_reduce(); // this method in particular enforces limb vals are <2^b - needed for logic described above - uint256_t value = get_value().lo; - // TODO:make formal assert that modulus<=256 bits - constexpr uint256_t modulus_minus_one = modulus_u512.lo - 1; - - constexpr uint256_t modulus_minus_one_0 = modulus_minus_one.slice(0, NUM_LIMB_BITS); - constexpr uint256_t modulus_minus_one_1 = modulus_minus_one.slice(NUM_LIMB_BITS, NUM_LIMB_BITS * 2); - constexpr uint256_t modulus_minus_one_2 = modulus_minus_one.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 3); - constexpr uint256_t modulus_minus_one_3 = modulus_minus_one.slice(NUM_LIMB_BITS * 3, NUM_LIMB_BITS * 4); - - bool borrow_0_value = value.slice(0, NUM_LIMB_BITS) > modulus_minus_one_0; - bool borrow_1_value = - (value.slice(NUM_LIMB_BITS, NUM_LIMB_BITS * 2) + uint256_t(borrow_0_value)) > (modulus_minus_one_1); - bool borrow_2_value = - (value.slice(NUM_LIMB_BITS * 2, NUM_LIMB_BITS * 3) + uint256_t(borrow_1_value)) > (modulus_minus_one_2); - - field_t modulus_0(context, modulus_minus_one_0); - field_t modulus_1(context, modulus_minus_one_1); - field_t modulus_2(context, modulus_minus_one_2); - field_t modulus_3(context, modulus_minus_one_3); - bool_t borrow_0(witness_t(context, borrow_0_value)); - bool_t borrow_1(witness_t(context, borrow_1_value)); - bool_t borrow_2(witness_t(context, borrow_2_value)); - // The way we use borrows here ensures that we are checking that modulus - binary_basis > 0. - // We check that the result in each limb is > 0. - // If the modulus part in this limb is smaller, we simply borrow the value from the higher limb. - // The prover can rearrange the borrows the way they like. The important thing is that the borrows are - // constrained. - field_t r0 = modulus_0 - binary_basis_limbs[0].element + static_cast>(borrow_0) * shift_1; - field_t r1 = modulus_1 - binary_basis_limbs[1].element + - static_cast>(borrow_1) * shift_1 - static_cast>(borrow_0); - field_t r2 = modulus_2 - binary_basis_limbs[2].element + - static_cast>(borrow_2) * shift_1 - static_cast>(borrow_1); - field_t r3 = modulus_3 - binary_basis_limbs[3].element - static_cast>(borrow_2); - r0 = r0.normalize(); - r1 = r1.normalize(); - r2 = r2.normalize(); - r3 = r3.normalize(); - if constexpr (HasPlookup) { - context->decompose_into_default_range(r0.witness_index, static_cast(NUM_LIMB_BITS)); - context->decompose_into_default_range(r1.witness_index, static_cast(NUM_LIMB_BITS)); - context->decompose_into_default_range(r2.witness_index, static_cast(NUM_LIMB_BITS)); - context->decompose_into_default_range(r3.witness_index, static_cast(NUM_LIMB_BITS)); - } else { - context->decompose_into_base4_accumulators( - r0.witness_index, static_cast(NUM_LIMB_BITS), "bigfield: assert_is_in_field range constraint 1."); - context->decompose_into_base4_accumulators( - r1.witness_index, static_cast(NUM_LIMB_BITS), "bigfield: assert_is_in_field range constraint 2."); - context->decompose_into_base4_accumulators( - r2.witness_index, static_cast(NUM_LIMB_BITS), "bigfield: assert_is_in_field range constraint 3."); - context->decompose_into_base4_accumulators( - r3.witness_index, static_cast(NUM_LIMB_BITS), "bigfield: assert_is_in_field range constraint 4."); - } + assert_less_than(modulus); } template void bigfield::assert_less_than(const uint256_t upper_limit) const { - // TODO(kesha): Merge this with assert_is_in_field // Warning: this assumes we have run circuit construction at least once in debug mode where large non reduced // constants are allowed via ASSERT if constexpr (IsSimulator) { @@ -1900,8 +1981,8 @@ template void bigfield::assert_less_t } ASSERT(upper_limit != 0); - // The circuit checks that limit - this >= 0, so if we are doing a less_than comparison, we need to subtract 1 from - // the limit + // The circuit checks that limit - this >= 0, so if we are doing a less_than comparison, we need to subtract 1 + // from the limit uint256_t strict_upper_limit = upper_limit - uint256_t(1); self_reduce(); // this method in particular enforces limb vals are <2^b - needed for logic described above uint256_t value = get_value().lo; @@ -1940,25 +2021,32 @@ template void bigfield::assert_less_t r1 = r1.normalize(); r2 = r2.normalize(); r3 = r3.normalize(); - if constexpr (Builder::CIRCUIT_TYPE == CircuitType::ULTRA) { - context->decompose_into_default_range(r0.witness_index, static_cast(NUM_LIMB_BITS)); - context->decompose_into_default_range(r1.witness_index, static_cast(NUM_LIMB_BITS)); - context->decompose_into_default_range(r2.witness_index, static_cast(NUM_LIMB_BITS)); - context->decompose_into_default_range(r3.witness_index, static_cast(NUM_LIMB_BITS)); + if constexpr (HasPlookup) { + context->decompose_into_default_range(r0.get_normalized_witness_index(), static_cast(NUM_LIMB_BITS)); + context->decompose_into_default_range(r1.get_normalized_witness_index(), static_cast(NUM_LIMB_BITS)); + context->decompose_into_default_range(r2.get_normalized_witness_index(), static_cast(NUM_LIMB_BITS)); + context->decompose_into_default_range(r3.get_normalized_witness_index(), static_cast(NUM_LIMB_BITS)); } else { - context->decompose_into_base4_accumulators( - r0.witness_index, static_cast(NUM_LIMB_BITS), "bigfield: assert_less_than range constraint 1."); - context->decompose_into_base4_accumulators( - r1.witness_index, static_cast(NUM_LIMB_BITS), "bigfield: assert_less_than range constraint 2."); - context->decompose_into_base4_accumulators( - r2.witness_index, static_cast(NUM_LIMB_BITS), "bigfield: assert_less_than range constraint 3."); - context->decompose_into_base4_accumulators( - r3.witness_index, static_cast(NUM_LIMB_BITS), "bigfield: assert_less_than range constraint 4."); + context->decompose_into_base4_accumulators(r0.get_normalized_witness_index(), + static_cast(NUM_LIMB_BITS), + "bigfield: assert_less_than range constraint 1."); + context->decompose_into_base4_accumulators(r1.get_normalized_witness_index(), + static_cast(NUM_LIMB_BITS), + "bigfield: assert_less_than range constraint 2."); + context->decompose_into_base4_accumulators(r2.get_normalized_witness_index(), + static_cast(NUM_LIMB_BITS), + "bigfield: assert_less_than range constraint 3."); + context->decompose_into_base4_accumulators(r3.get_normalized_witness_index(), + static_cast(NUM_LIMB_BITS), + "bigfield: assert_less_than range constraint 4."); } } // check elements are equal mod p by proving their integer difference is a multiple of p. // This relies on the minus operator for a-b increasing a by a multiple of p large enough so diff is non-negative +// When one of the elements is a constant and another is a witness we check equality of limbs, so if the witness +// bigfield element is in an unreduced form, it needs to be reduced first. We don't have automatice reduced form +// detection for now, so it is up to the circuit writer to detect this template void bigfield::assert_equal(const bigfield& other) const { Builder* ctx = this->context ? this->context : other.context; @@ -1970,6 +2058,7 @@ template void bigfield::assert_equal( if (is_constant() && other.is_constant()) { std::cerr << "bigfield: calling assert equal on 2 CONSTANT bigfield elements...is this intended?" << std::endl; + ASSERT(get_value() == other.get_value()); // We expect constants to be less than the target modulus return; } else if (other.is_constant()) { // TODO(https://github.com/AztecProtocol/barretenberg/issues/998): Something is fishy here @@ -1990,28 +2079,6 @@ template void bigfield::assert_equal( other.assert_equal(*this); return; } else { - if (is_constant() && other.is_constant()) { - std::cerr << "bigfield: calling assert equal on 2 CONSTANT bigfield elements...is this intended?" - << std::endl; - return; - } else if (other.is_constant()) { - // evaluate a strict equality - make sure *this is reduced first, or an honest prover - // might not be able to satisfy these constraints. - field_t t0 = (binary_basis_limbs[0].element - other.binary_basis_limbs[0].element); - field_t t1 = (binary_basis_limbs[1].element - other.binary_basis_limbs[1].element); - field_t t2 = (binary_basis_limbs[2].element - other.binary_basis_limbs[2].element); - field_t t3 = (binary_basis_limbs[3].element - other.binary_basis_limbs[3].element); - field_t t4 = (prime_basis_limb - other.prime_basis_limb); - t0.assert_is_zero(); - t1.assert_is_zero(); - t2.assert_is_zero(); - t3.assert_is_zero(); - t4.assert_is_zero(); - return; - } else if (is_constant()) { - other.assert_equal(*this); - return; - } bigfield diff = *this - other; const uint512_t diff_val = diff.get_value(); @@ -2047,7 +2114,7 @@ template void bigfield::assert_is_not const auto get_overload_count = [target_modulus = modulus_u512](const uint512_t& maximum_value) { uint512_t target = target_modulus; size_t overload_count = 0; - while (target < maximum_value) { + while (target <= maximum_value) { ++overload_count; target += target_modulus; } @@ -2101,13 +2168,15 @@ template void bigfield::self_reduce() if ((maximum_quotient_bits & 1ULL) == 1ULL) { ++maximum_quotient_bits; } - // TODO: implicit assumption here - NUM_LIMB_BITS large enough for all the quotient + + ASSERT(maximum_quotient_bits <= NUM_LIMB_BITS); uint32_t quotient_limb_index = context->add_variable(bb::fr(quotient_value.lo)); field_t quotient_limb = field_t::from_witness_index(context, quotient_limb_index); if constexpr (HasPlookup) { - context->decompose_into_default_range(quotient_limb.witness_index, static_cast(maximum_quotient_bits)); + context->decompose_into_default_range(quotient_limb.get_normalized_witness_index(), + static_cast(maximum_quotient_bits)); } else { - context->decompose_into_base4_accumulators(quotient_limb.witness_index, + context->decompose_into_base4_accumulators(quotient_limb.get_normalized_witness_index(), static_cast(maximum_quotient_bits), "bigfield: quotient_limb too large."); } @@ -2154,7 +2223,21 @@ void bigfield::unsafe_evaluate_multiply_add(const bigfield& input_le const std::vector& input_remainders) { + ASSERT(to_add.size() <= MAXIMUM_SUMMAND_COUNT); + ASSERT(input_remainders.size() <= MAXIMUM_SUMMAND_COUNT); + // Sanity checks + input_left.sanity_check(); + input_to_mul.sanity_check(); + input_quotient.sanity_check(); + for (auto& el : to_add) { + el.sanity_check(); + } + for (auto& el : input_remainders) { + el.sanity_check(); + } + std::vector remainders(input_remainders); + bigfield left = input_left; bigfield to_mul = input_to_mul; bigfield quotient = input_quotient; @@ -2183,7 +2266,19 @@ void bigfield::unsafe_evaluate_multiply_add(const bigfield& input_le uint512_t max_r0 = left.binary_basis_limbs[0].maximum_value * to_mul.binary_basis_limbs[0].maximum_value; max_r0 += (neg_modulus_limbs_u256[0] * quotient.binary_basis_limbs[0].maximum_value); - const uint512_t max_r1 = max_b0 + max_b1; + uint512_t max_r1 = max_b0 + max_b1; + + uint256_t borrow_lo_value = 0; + for (const auto& remainder : input_remainders) { + max_r0 += remainder.binary_basis_limbs[0].maximum_value; + max_r1 += remainder.binary_basis_limbs[1].maximum_value; + + borrow_lo_value += (remainder.binary_basis_limbs[0].maximum_value + + (remainder.binary_basis_limbs[1].maximum_value << NUM_LIMB_BITS)); + } + borrow_lo_value >>= 2 * NUM_LIMB_BITS; + field_t borrow_lo(ctx, bb::fr(borrow_lo_value)); + const uint512_t max_r2 = max_c0 + max_c1 + max_c2; const uint512_t max_r3 = max_d0 + max_d1 + max_d2 + max_d3; @@ -2196,7 +2291,8 @@ void bigfield::unsafe_evaluate_multiply_add(const bigfield& input_le (to_add[i].binary_basis_limbs[3].maximum_value << NUM_LIMB_BITS); } const uint512_t max_lo = max_r0 + (max_r1 << NUM_LIMB_BITS) + max_a0; - const uint512_t max_hi = max_r2 + (max_r3 << NUM_LIMB_BITS) + max_a1; + const uint512_t max_lo_carry = max_lo >> (2 * NUM_LIMB_BITS); + const uint512_t max_hi = max_r2 + (max_r3 << NUM_LIMB_BITS) + max_a1 + max_lo_carry; uint64_t max_lo_bits = (max_lo.get_msb() + 1); uint64_t max_hi_bits = max_hi.get_msb() + 1; @@ -2207,6 +2303,15 @@ void bigfield::unsafe_evaluate_multiply_add(const bigfield& input_le ++max_hi_bits; } + uint64_t carry_lo_msb = max_lo_bits - (2 * NUM_LIMB_BITS); + uint64_t carry_hi_msb = max_hi_bits - (2 * NUM_LIMB_BITS); + + if (max_lo_bits < (2 * NUM_LIMB_BITS)) { + carry_lo_msb = 0; + } + if (max_hi_bits < (2 * NUM_LIMB_BITS)) { + carry_hi_msb = 0; + } if constexpr (HasPlookup) { // The plookup custom bigfield gate requires inputs are witnesses. // If we're using constant values, instantiate them as circuit variables @@ -2278,38 +2383,34 @@ void bigfield::unsafe_evaluate_multiply_add(const bigfield& input_le bb::non_native_field_witnesses witnesses{ { - left.binary_basis_limbs[0].element.normalize().witness_index, - left.binary_basis_limbs[1].element.normalize().witness_index, - left.binary_basis_limbs[2].element.normalize().witness_index, - left.binary_basis_limbs[3].element.normalize().witness_index, - left.prime_basis_limb.witness_index, + left.binary_basis_limbs[0].element.get_normalized_witness_index(), + left.binary_basis_limbs[1].element.get_normalized_witness_index(), + left.binary_basis_limbs[2].element.get_normalized_witness_index(), + left.binary_basis_limbs[3].element.get_normalized_witness_index(), }, { - to_mul.binary_basis_limbs[0].element.normalize().witness_index, - to_mul.binary_basis_limbs[1].element.normalize().witness_index, - to_mul.binary_basis_limbs[2].element.normalize().witness_index, - to_mul.binary_basis_limbs[3].element.normalize().witness_index, - to_mul.prime_basis_limb.witness_index, + to_mul.binary_basis_limbs[0].element.get_normalized_witness_index(), + to_mul.binary_basis_limbs[1].element.get_normalized_witness_index(), + to_mul.binary_basis_limbs[2].element.get_normalized_witness_index(), + to_mul.binary_basis_limbs[3].element.get_normalized_witness_index(), }, { - quotient.binary_basis_limbs[0].element.normalize().witness_index, - quotient.binary_basis_limbs[1].element.normalize().witness_index, - quotient.binary_basis_limbs[2].element.normalize().witness_index, - quotient.binary_basis_limbs[3].element.normalize().witness_index, - quotient.prime_basis_limb.witness_index, + quotient.binary_basis_limbs[0].element.get_normalized_witness_index(), + quotient.binary_basis_limbs[1].element.get_normalized_witness_index(), + quotient.binary_basis_limbs[2].element.get_normalized_witness_index(), + quotient.binary_basis_limbs[3].element.get_normalized_witness_index(), }, { - remainder_limbs[0].normalize().witness_index, - remainder_limbs[1].normalize().witness_index, - remainder_limbs[2].normalize().witness_index, - remainder_limbs[3].normalize().witness_index, - remainder_prime_limb.witness_index, + remainder_limbs[0].get_normalized_witness_index(), + remainder_limbs[1].get_normalized_witness_index(), + remainder_limbs[2].get_normalized_witness_index(), + remainder_limbs[3].get_normalized_witness_index(), }, { neg_modulus_limbs[0], neg_modulus_limbs[1], neg_modulus_limbs[2], neg_modulus_limbs[3] }, modulus, }; // N.B. this method also evaluates the prime field component of the non-native field mul - const auto [lo_idx, hi_idx] = ctx->evaluate_non_native_field_multiplication(witnesses, false); + const auto [lo_idx, hi_idx] = ctx->evaluate_non_native_field_multiplication(witnesses); bb::fr neg_prime = -bb::fr(uint256_t(target_basis.modulus)); field_t::evaluate_polynomial_identity(left.prime_basis_limb, @@ -2317,19 +2418,19 @@ void bigfield::unsafe_evaluate_multiply_add(const bigfield& input_le quotient.prime_basis_limb * neg_prime, -remainder_prime_limb); - field_t lo = field_t::from_witness_index(ctx, lo_idx); + field_t lo = field_t::from_witness_index(ctx, lo_idx) + borrow_lo; field_t hi = field_t::from_witness_index(ctx, hi_idx); - const uint64_t carry_lo_msb = max_lo_bits - (2 * NUM_LIMB_BITS); - const uint64_t carry_hi_msb = max_hi_bits - (2 * NUM_LIMB_BITS); // if both the hi and lo output limbs have less than 70 bits, we can use our custom // limb accumulation gate (accumulates 2 field elements, each composed of 5 14-bit limbs, in 3 gates) if (carry_lo_msb <= 70 && carry_hi_msb <= 70) { - ctx->range_constrain_two_limbs( - hi.witness_index, lo.witness_index, size_t(carry_lo_msb), size_t(carry_hi_msb)); + ctx->range_constrain_two_limbs(hi.get_normalized_witness_index(), + lo.get_normalized_witness_index(), + size_t(carry_hi_msb), + size_t(carry_lo_msb)); } else { - ctx->decompose_into_default_range(hi.normalize().witness_index, carry_hi_msb); - ctx->decompose_into_default_range(lo.normalize().witness_index, carry_lo_msb); + ctx->decompose_into_default_range(hi.get_normalized_witness_index(), carry_hi_msb); + ctx->decompose_into_default_range(lo.get_normalized_witness_index(), carry_lo_msb); } } else { const field_t b0 = left.binary_basis_limbs[1].element.madd( @@ -2351,9 +2452,9 @@ void bigfield::unsafe_evaluate_multiply_add(const bigfield& input_le const field_t d3 = left.binary_basis_limbs[0].element.madd( to_mul.binary_basis_limbs[3].element, quotient.binary_basis_limbs[0].element * neg_modulus_limbs[3]); - // We wish to show that left*right - quotient*remainder = 0 mod 2^t, we do this by collecting the limb products - // into two separate variables - carry_lo and carry_hi, which are still small enough not to wrap mod r - // Their first t/2 bits will equal, respectively, the first and second t/2 bits of the expresssion + // We wish to show that left*right - quotient*remainder = 0 mod 2^t, we do this by collecting the limb + // products into two separate variables - carry_lo and carry_hi, which are still small enough not to wrap + // mod r Their first t/2 bits will equal, respectively, the first and second t/2 bits of the expresssion // Thus it will suffice to check that each of them begins with t/2 zeroes. We do this by in fact assigning // to these variables those expressions divided by 2^{t/2}. Since we have bounds on their ranage that are // smaller than r, We can range check the divisions by the original range bounds divided by 2^{t/2} @@ -2379,6 +2480,7 @@ void bigfield::unsafe_evaluate_multiply_add(const bigfield& input_le } field_t t1 = carry_lo.add_two(-remainders[0].binary_basis_limbs[2].element, -(remainders[0].binary_basis_limbs[3].element * shift_1)); + carry_lo += borrow_lo; field_t carry_hi_0 = r2 * shift_right_2; field_t carry_hi_1 = r3 * (shift_1 * shift_right_2); field_t carry_hi_2 = t1 * shift_right_2; @@ -2416,15 +2518,12 @@ void bigfield::unsafe_evaluate_multiply_add(const bigfield& input_le field_t::evaluate_polynomial_identity( left.prime_basis_limb, to_mul.prime_basis_limb, quotient.prime_basis_limb * neg_prime, linear_terms); - const uint64_t carry_lo_msb = max_lo_bits - (2 * NUM_LIMB_BITS); - const uint64_t carry_hi_msb = max_hi_bits - (2 * NUM_LIMB_BITS); - const bb::fr carry_lo_shift(uint256_t(uint256_t(1) << carry_lo_msb)); if ((carry_hi_msb + carry_lo_msb) < field_t::modulus.get_msb()) { field_t carry_combined = carry_lo + (carry_hi * carry_lo_shift); carry_combined = carry_combined.normalize(); const auto accumulators = ctx->decompose_into_base4_accumulators( - carry_combined.witness_index, + carry_combined.get_normalized_witness_index(), static_cast(carry_lo_msb + carry_hi_msb), "bigfield: carry_combined too large in unsafe_evaluate_multiply_add."); field_t accumulator_midpoint = @@ -2433,10 +2532,10 @@ void bigfield::unsafe_evaluate_multiply_add(const bigfield& input_le } else { carry_lo = carry_lo.normalize(); carry_hi = carry_hi.normalize(); - ctx->decompose_into_base4_accumulators(carry_lo.witness_index, + ctx->decompose_into_base4_accumulators(carry_lo.get_normalized_witness_index(), static_cast(carry_lo_msb), "bigfield: carry_lo too large in unsafe_evaluate_multiply_add."); - ctx->decompose_into_base4_accumulators(carry_hi.witness_index, + ctx->decompose_into_base4_accumulators(carry_hi.get_normalized_witness_index(), static_cast(carry_hi_msb), "bigfield: carry_hi too large in unsafe_evaluate_multiply_add."); } @@ -2471,7 +2570,26 @@ void bigfield::unsafe_evaluate_multiple_multiply_add(const std::vect const bigfield& input_quotient, const std::vector& input_remainders) { + ASSERT(input_left.size() == input_right.size()); + ASSERT(input_left.size() <= MAXIMUM_SUMMAND_COUNT); + ASSERT(to_add.size() <= MAXIMUM_SUMMAND_COUNT); + ASSERT(input_remainders.size() <= MAXIMUM_SUMMAND_COUNT); + ASSERT(input_left.size() == input_right.size() && input_left.size() < 1024); + // Sanity checks + for (auto& el : input_left) { + el.sanity_check(); + } + for (auto& el : input_right) { + el.sanity_check(); + } + for (auto& el : to_add) { + el.sanity_check(); + } + input_quotient.sanity_check(); + for (auto& el : input_remainders) { + el.sanity_check(); + } std::vector remainders(input_remainders); std::vector left(input_left); std::vector right(input_right); @@ -2527,7 +2645,19 @@ void bigfield::unsafe_evaluate_multiple_multiply_add(const std::vect // max_r3 = terms from 2^3t - 2^5t uint512_t max_r0 = (neg_modulus_limbs_u256[0] * quotient.binary_basis_limbs[0].maximum_value); max_r0 += (neg_modulus_limbs_u256[0] * quotient.binary_basis_limbs[0].maximum_value); - const uint512_t max_r1 = max_b0 + max_b1; + uint512_t max_r1 = max_b0 + max_b1; + + uint256_t borrow_lo_value(0); + for (const auto& remainder : input_remainders) { + max_r0 += remainder.binary_basis_limbs[0].maximum_value; + max_r1 += remainder.binary_basis_limbs[1].maximum_value; + + borrow_lo_value += remainder.binary_basis_limbs[0].maximum_value + + (remainder.binary_basis_limbs[1].maximum_value << NUM_LIMB_BITS); + } + borrow_lo_value >>= 2 * NUM_LIMB_BITS; + field_t borrow_lo(ctx, bb::fr(borrow_lo_value)); + const uint512_t max_r2 = max_c0 + max_c1 + max_c2; const uint512_t max_r3 = max_d0 + max_d1 + max_d2 + max_d3; @@ -2554,6 +2684,8 @@ void bigfield::unsafe_evaluate_multiple_multiply_add(const std::vect max_hi += product_hi; } + const uint512_t max_lo_carry = max_lo >> (2 * NUM_LIMB_BITS); + max_hi += max_lo_carry; // Compute the maximum number of bits in `max_lo` and `max_hi` - this defines the range constraint values we // will need to apply to validate our product uint64_t max_lo_bits = (max_lo.get_msb() + 1); @@ -2618,32 +2750,28 @@ void bigfield::unsafe_evaluate_multiple_multiply_add(const std::vect if (i > 0) { bb::non_native_field_witnesses mul_witnesses = { { - left[i].binary_basis_limbs[0].element.normalize().witness_index, - left[i].binary_basis_limbs[1].element.normalize().witness_index, - left[i].binary_basis_limbs[2].element.normalize().witness_index, - left[i].binary_basis_limbs[3].element.normalize().witness_index, - left[i].prime_basis_limb.witness_index, + left[i].binary_basis_limbs[0].element.get_normalized_witness_index(), + left[i].binary_basis_limbs[1].element.get_normalized_witness_index(), + left[i].binary_basis_limbs[2].element.get_normalized_witness_index(), + left[i].binary_basis_limbs[3].element.get_normalized_witness_index(), }, { - right[i].binary_basis_limbs[0].element.normalize().witness_index, - right[i].binary_basis_limbs[1].element.normalize().witness_index, - right[i].binary_basis_limbs[2].element.normalize().witness_index, - right[i].binary_basis_limbs[3].element.normalize().witness_index, - right[i].prime_basis_limb.witness_index, + right[i].binary_basis_limbs[0].element.get_normalized_witness_index(), + right[i].binary_basis_limbs[1].element.get_normalized_witness_index(), + right[i].binary_basis_limbs[2].element.get_normalized_witness_index(), + right[i].binary_basis_limbs[3].element.get_normalized_witness_index(), }, { ctx->zero_idx, ctx->zero_idx, ctx->zero_idx, ctx->zero_idx, - ctx->zero_idx, }, { ctx->zero_idx, ctx->zero_idx, ctx->zero_idx, ctx->zero_idx, - ctx->zero_idx, }, { 0, 0, 0, 0 }, modulus, @@ -2714,38 +2842,34 @@ void bigfield::unsafe_evaluate_multiple_multiply_add(const std::vect bb::non_native_field_witnesses witnesses{ { - left[0].binary_basis_limbs[0].element.normalize().witness_index, - left[0].binary_basis_limbs[1].element.normalize().witness_index, - left[0].binary_basis_limbs[2].element.normalize().witness_index, - left[0].binary_basis_limbs[3].element.normalize().witness_index, - left[0].prime_basis_limb.normalize().witness_index, + left[0].binary_basis_limbs[0].element.get_normalized_witness_index(), + left[0].binary_basis_limbs[1].element.get_normalized_witness_index(), + left[0].binary_basis_limbs[2].element.get_normalized_witness_index(), + left[0].binary_basis_limbs[3].element.get_normalized_witness_index(), }, { - right[0].binary_basis_limbs[0].element.normalize().witness_index, - right[0].binary_basis_limbs[1].element.normalize().witness_index, - right[0].binary_basis_limbs[2].element.normalize().witness_index, - right[0].binary_basis_limbs[3].element.normalize().witness_index, - right[0].prime_basis_limb.normalize().witness_index, + right[0].binary_basis_limbs[0].element.get_normalized_witness_index(), + right[0].binary_basis_limbs[1].element.get_normalized_witness_index(), + right[0].binary_basis_limbs[2].element.get_normalized_witness_index(), + right[0].binary_basis_limbs[3].element.get_normalized_witness_index(), }, { - quotient.binary_basis_limbs[0].element.normalize().witness_index, - quotient.binary_basis_limbs[1].element.normalize().witness_index, - quotient.binary_basis_limbs[2].element.normalize().witness_index, - quotient.binary_basis_limbs[3].element.normalize().witness_index, - quotient.prime_basis_limb.normalize().witness_index, + quotient.binary_basis_limbs[0].element.get_normalized_witness_index(), + quotient.binary_basis_limbs[1].element.get_normalized_witness_index(), + quotient.binary_basis_limbs[2].element.get_normalized_witness_index(), + quotient.binary_basis_limbs[3].element.get_normalized_witness_index(), }, { - remainder_limbs[0].normalize().witness_index, - remainder_limbs[1].normalize().witness_index, - remainder_limbs[2].normalize().witness_index, - remainder_limbs[3].normalize().witness_index, - remainder_prime_limb.normalize().witness_index, + remainder_limbs[0].get_normalized_witness_index(), + remainder_limbs[1].get_normalized_witness_index(), + remainder_limbs[2].get_normalized_witness_index(), + remainder_limbs[3].get_normalized_witness_index(), }, { neg_modulus_limbs[0], neg_modulus_limbs[1], neg_modulus_limbs[2], neg_modulus_limbs[3] }, modulus, }; - const auto [lo_1_idx, hi_1_idx] = ctx->evaluate_non_native_field_multiplication(witnesses, false); + const auto [lo_1_idx, hi_1_idx] = ctx->evaluate_non_native_field_multiplication(witnesses); bb::fr neg_prime = -bb::fr(uint256_t(target_basis.modulus)); @@ -2754,20 +2878,29 @@ void bigfield::unsafe_evaluate_multiple_multiply_add(const std::vect quotient.prime_basis_limb * neg_prime, -remainder_prime_limb); - field_t lo = field_t::from_witness_index(ctx, lo_1_idx); + field_t lo = field_t::from_witness_index(ctx, lo_1_idx) + borrow_lo; field_t hi = field_t::from_witness_index(ctx, hi_1_idx); - const uint64_t carry_lo_msb = max_lo_bits - (2 * NUM_LIMB_BITS); - const uint64_t carry_hi_msb = max_hi_bits - (2 * NUM_LIMB_BITS); + uint64_t carry_lo_msb = max_lo_bits - (2 * NUM_LIMB_BITS); + uint64_t carry_hi_msb = max_hi_bits - (2 * NUM_LIMB_BITS); + + if (max_lo_bits < (2 * NUM_LIMB_BITS)) { + carry_lo_msb = 0; + } + if (max_hi_bits < (2 * NUM_LIMB_BITS)) { + carry_hi_msb = 0; + } // if both the hi and lo output limbs have less than 70 bits, we can use our custom // limb accumulation gate (accumulates 2 field elements, each composed of 5 14-bit limbs, in 3 gates) if (carry_lo_msb <= 70 && carry_hi_msb <= 70) { - ctx->range_constrain_two_limbs( - hi.witness_index, lo.witness_index, (size_t)carry_lo_msb, (size_t)carry_hi_msb); + ctx->range_constrain_two_limbs(hi.get_normalized_witness_index(), + lo.get_normalized_witness_index(), + (size_t)carry_hi_msb, + (size_t)carry_lo_msb); } else { - ctx->decompose_into_default_range(hi.normalize().witness_index, carry_hi_msb); - ctx->decompose_into_default_range(lo.normalize().witness_index, carry_lo_msb); + ctx->decompose_into_default_range(hi.get_normalized_witness_index(), carry_hi_msb); + ctx->decompose_into_default_range(lo.get_normalized_witness_index(), carry_lo_msb); } /* NOTE TO AUDITOR: An extraneous block if constexpr (HasPlookup) { @@ -2913,6 +3046,7 @@ void bigfield::unsafe_evaluate_multiple_multiply_add(const std::vect field_t carry_lo = carry_lo_0.add_two(carry_lo_1, carry_lo_2); field_t t1 = carry_lo.add_two(-remainder_limbs[2], -(remainder_limbs[3] * shift_1)); + carry_lo += borrow_lo; field_t carry_hi_0 = r2 * shift_right_2; field_t carry_hi_1 = r3 * (shift_1 * shift_right_2); field_t carry_hi_2 = t1 * shift_right_2; @@ -2936,15 +3070,17 @@ void bigfield::unsafe_evaluate_multiple_multiply_add(const std::vect if constexpr (HasPlookup) { carry_lo = carry_lo.normalize(); carry_hi = carry_hi.normalize(); - ctx->decompose_into_default_range(carry_lo.witness_index, static_cast(carry_lo_msb)); - ctx->decompose_into_default_range(carry_hi.witness_index, static_cast(carry_hi_msb)); + ctx->decompose_into_default_range(carry_lo.get_normalized_witness_index(), + static_cast(carry_lo_msb)); + ctx->decompose_into_default_range(carry_hi.get_normalized_witness_index(), + static_cast(carry_hi_msb)); } else { if ((carry_hi_msb + carry_lo_msb) < field_t::modulus.get_msb()) { field_t carry_combined = carry_lo + (carry_hi * carry_lo_shift); carry_combined = carry_combined.normalize(); const auto accumulators = ctx->decompose_into_base4_accumulators( - carry_combined.witness_index, + carry_combined.get_normalized_witness_index(), static_cast(carry_lo_msb + carry_hi_msb), "bigfield: carry_combined too large in unsafe_evaluate_multiple_multiply_add."); field_t accumulator_midpoint = field_t::from_witness_index( @@ -2954,11 +3090,11 @@ void bigfield::unsafe_evaluate_multiple_multiply_add(const std::vect carry_lo = carry_lo.normalize(); carry_hi = carry_hi.normalize(); ctx->decompose_into_base4_accumulators( - carry_lo.witness_index, + carry_lo.get_normalized_witness_index(), static_cast(carry_lo_msb), "bigfield: carry_lo too large in unsafe_evaluate_multiple_multiply_add."); ctx->decompose_into_base4_accumulators( - carry_hi.witness_index, + carry_hi.get_normalized_witness_index(), static_cast(carry_hi_msb), "bigfield: carry_hi too large in unsafe_evaluate_multiple_multiply_add."); } @@ -2972,10 +3108,21 @@ void bigfield::unsafe_evaluate_square_add(const bigfield& left, const bigfield& quotient, const bigfield& remainder) { + ASSERT(to_add.size() <= MAXIMUM_SUMMAND_COUNT); + if (HasPlookup) { unsafe_evaluate_multiply_add(left, left, to_add, quotient, { remainder }); return; } + + // Sanity checks + left.sanity_check(); + remainder.sanity_check(); + quotient.sanity_check(); + for (auto& el : to_add) { + el.sanity_check(); + } + Builder* ctx = left.context == nullptr ? quotient.context : left.context; uint512_t max_b0 = (left.binary_basis_limbs[1].maximum_value * left.binary_basis_limbs[0].maximum_value); @@ -3096,15 +3243,15 @@ void bigfield::unsafe_evaluate_square_add(const bigfield& left, if constexpr (HasPlookup) { carry_lo = carry_lo.normalize(); carry_hi = carry_hi.normalize(); - ctx->decompose_into_default_range(carry_lo.witness_index, static_cast(carry_lo_msb)); - ctx->decompose_into_default_range(carry_hi.witness_index, static_cast(carry_hi_msb)); + ctx->decompose_into_default_range(carry_lo.get_normalized_witness_index(), static_cast(carry_lo_msb)); + ctx->decompose_into_default_range(carry_hi.get_normalized_witness_index(), static_cast(carry_hi_msb)); } else { if ((carry_hi_msb + carry_lo_msb) < field_t::modulus.get_msb()) { field_t carry_combined = carry_lo + (carry_hi * carry_lo_shift); carry_combined = carry_combined.normalize(); const auto accumulators = ctx->decompose_into_base4_accumulators( - carry_combined.witness_index, + carry_combined.get_normalized_witness_index(), static_cast(carry_lo_msb + carry_hi_msb), "bigfield: carry_combined too large in unsafe_evaluate_square_add."); field_t accumulator_midpoint = @@ -3113,10 +3260,10 @@ void bigfield::unsafe_evaluate_square_add(const bigfield& left, } else { carry_lo = carry_lo.normalize(); carry_hi = carry_hi.normalize(); - ctx->decompose_into_base4_accumulators(carry_lo.witness_index, + ctx->decompose_into_base4_accumulators(carry_lo.get_normalized_witness_index(), static_cast(carry_lo_msb), "bigfield: carry_lo too large in unsafe_evaluate_square_add."); - ctx->decompose_into_base4_accumulators(carry_hi.witness_index, + ctx->decompose_into_base4_accumulators(carry_hi.get_normalized_witness_index(), static_cast(carry_hi_msb), "bigfield: carry_hi too large in unsafe_evaluate_square_add"); } @@ -3127,6 +3274,8 @@ template std::pair bigfield::compute_quotient_remainder_values( const bigfield& a, const bigfield& b, const std::vector& to_add) { + ASSERT(to_add.size() <= MAXIMUM_SUMMAND_COUNT); + uint512_t add_values(0); for (const auto& add_element : to_add) { add_element.reduction_check(); @@ -3149,6 +3298,8 @@ uint512_t bigfield::compute_maximum_quotient_value(const std::vector const std::vector& to_add) { ASSERT(as.size() == bs.size()); + ASSERT(to_add.size() <= MAXIMUM_SUMMAND_COUNT); + uint512_t add_values(0); for (const auto& add_element : to_add) { add_values += add_element; @@ -3171,6 +3322,11 @@ std::pair bigfield::get_quotient_reduction_info(const const std::vector& remainders_max) { ASSERT(as_max.size() == bs_max.size()); + + ASSERT(to_add.size() <= MAXIMUM_SUMMAND_COUNT); + ASSERT(as_max.size() <= MAXIMUM_SUMMAND_COUNT); + ASSERT(remainders_max.size() <= MAXIMUM_SUMMAND_COUNT); + // Check if the product sum can overflow CRT modulus if (mul_product_overflows_crt_modulus(as_max, bs_max, to_add)) { return std::pair(true, 0); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/goblin_field.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/goblin_field.hpp index 8e18a679ce4..c26305e7d4d 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/goblin_field.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/goblin_field.hpp @@ -53,9 +53,13 @@ template class goblin_field { // N.B. this method is because AggregationState expects group element coordinates to be split into 4 slices // (we could update to only use 2 for Mega but that feels complex) - goblin_field(field_ct lolo, field_ct lohi, field_ct hilo, field_ct hihi, [[maybe_unused]] bool can_overflow = false) - : limbs{ lolo + lohi * (uint256_t(1) << NUM_LIMB_BITS), hilo + hihi * (uint256_t(1) << NUM_LIMB_BITS) } - {} + static goblin_field construct_from_limbs( + field_ct lolo, field_ct lohi, field_ct hilo, field_ct hihi, [[maybe_unused]] bool can_overflow = false) + { + goblin_field result; + result.limbs = { lolo + lohi * (uint256_t(1) << NUM_LIMB_BITS), hilo + hihi * (uint256_t(1) << NUM_LIMB_BITS) }; + return result; + } void assert_equal(const goblin_field& other) const { diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_impl.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_impl.hpp index aa667bf2595..03872a3a945 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_impl.hpp @@ -93,7 +93,7 @@ element element::operator+(const element& other) con // If either inputs are points at infinity, we set lambda_denominator to be 1. This ensures we never trigger a // divide by zero error. // Note: if either inputs are points at infinity we will not use the result of this computation. - Fq safe_edgecase_denominator = Fq(field_t(1), field_t(0), field_t(0), field_t(0)); + Fq safe_edgecase_denominator = Fq(1); lambda_denominator = Fq::conditional_assign( lhs_infinity || rhs_infinity || infinity_predicate, safe_edgecase_denominator, lambda_denominator); const Fq lambda = Fq::div_without_denominator_check({ lambda_numerator }, lambda_denominator); @@ -163,7 +163,7 @@ element element::operator-(const element& other) con // If either inputs are points at infinity, we set lambda_denominator to be 1. This ensures we never trigger a // divide by zero error. // (if either inputs are points at infinity we will not use the result of this computation) - Fq safe_edgecase_denominator = Fq(field_t(1), field_t(0), field_t(0), field_t(0)); + Fq safe_edgecase_denominator = Fq(1); lambda_denominator = Fq::conditional_assign( lhs_infinity || rhs_infinity || infinity_predicate, safe_edgecase_denominator, lambda_denominator); const Fq lambda = Fq::div_without_denominator_check({ lambda_numerator }, lambda_denominator); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_tables.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_tables.hpp index 821779acbf6..40cfcf24abc 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_tables.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_tables.hpp @@ -74,8 +74,9 @@ element element::read_group_element_rom_tables( const auto yhi = tables[3][index]; const auto xyprime = tables[4][index]; - Fq x_fq(xlo[0], xlo[1], xhi[0], xhi[1], xyprime[0]); - Fq y_fq(ylo[0], ylo[1], yhi[0], yhi[1], xyprime[1]); + // We assign maximum_value of each limb here, so we can use the unsafe API from element construction + Fq x_fq = Fq::unsafe_construct_from_limbs(xlo[0], xlo[1], xhi[0], xhi[1], xyprime[0]); + Fq y_fq = Fq::unsafe_construct_from_limbs(ylo[0], ylo[1], yhi[0], yhi[1], xyprime[1]); x_fq.binary_basis_limbs[0].maximum_value = limb_max[0]; x_fq.binary_basis_limbs[1].maximum_value = limb_max[1]; x_fq.binary_basis_limbs[2].maximum_value = limb_max[2]; @@ -157,8 +158,10 @@ element element::eight_bit_fixed_base_table::oper const auto yhi = plookup_read::read_pair_from_table(tags[3], index); const auto xyprime = plookup_read::read_pair_from_table(tags[4], index); - Fq x = Fq(xlo.first, xlo.second, xhi.first, xhi.second, xyprime.first); - Fq y = Fq(ylo.first, ylo.second, yhi.first, yhi.second, xyprime.second); + // All the elements are precomputed constants so they are completely reduced, so the default maximum limb values are + // appropriate + Fq x = Fq::unsafe_construct_from_limbs(xlo.first, xlo.second, xhi.first, xhi.second, xyprime.first); + Fq y = Fq::unsafe_construct_from_limbs(ylo.first, ylo.second, yhi.first, yhi.second, xyprime.second); if (use_endomorphism) { y = -y; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bool/bool.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bool/bool.hpp index 686b9a6deb1..76d4c333700 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bool/bool.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bool/bool.hpp @@ -63,6 +63,8 @@ template class bool_t { bool_t normalize() const; + uint32_t get_normalized_witness_index() const { return normalize().witness_index; } + Builder* get_context() const { return context; } void set_origin_tag(const OriginTag& new_tag) const { tag = new_tag; } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/databus/databus.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/databus/databus.hpp index 6639158e411..6fa48ee7957 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/databus/databus.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/databus/databus.hpp @@ -249,7 +249,7 @@ template class DataBusDepot { l1.create_range_constraint(Fq::NUM_LIMB_BITS, "l1"); l2.create_range_constraint(Fq::NUM_LIMB_BITS, "l2"); l3.create_range_constraint(Fq::NUM_LAST_LIMB_BITS, "l3"); - return Fq(l0, l1, l2, l3, /*can_overflow=*/false); + return Fq::construct_from_limbs(l0, l1, l2, l3, /*can_overflow=*/false); } void assert_equality_of_commitments(const Commitment& P0, const Commitment& P1) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp index 1081be69d34..51b4b726be5 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp @@ -77,7 +77,15 @@ template field_t::operator bool_t() const bool mul_constant_check = (multiplicative_constant == bb::fr::one()); bool inverted_check = (additive_constant == bb::fr::one()) && (multiplicative_constant == bb::fr::neg_one()); if ((!add_constant_check || !mul_constant_check) && !inverted_check) { - normalize(); + auto normalized_element = normalize(); + bb::fr witness = context->get_variable(normalized_element.get_witness_index()); + ASSERT((witness == bb::fr::zero()) || (witness == bb::fr::one())); + bool_t result(context); + result.witness_bool = (witness == bb::fr::one()); + result.witness_inverted = false; + result.witness_index = normalized_element.get_witness_index(); + context->create_bool_gate(normalized_element.get_witness_index()); + return result; } bb::fr witness = context->get_variable(witness_index); @@ -509,6 +517,18 @@ template field_t field_t::add_two(const fie return result; } +/** + * @brief Return an new element, where the in-circuit witness contains the actual represented value (multiplicative + * constant is 1 and additive_constant is 0) + * + * @details If the element is a constant or it is already normalized, just return the element itself + * + *@todo We need to add a mechanism into the circuit builders for caching normalized variants for fields and bigfields. + *It should make the circuits smaller. https://github.com/AztecProtocol/barretenberg/issues/1052 + * + * @tparam Builder + * @return field_t + */ template field_t field_t::normalize() const { if (witness_index == IS_CONSTANT || @@ -923,6 +943,7 @@ void field_t::evaluate_linear_identity(const field_t& a, const field_t& if (a.witness_index == IS_CONSTANT && b.witness_index == IS_CONSTANT && c.witness_index == IS_CONSTANT && d.witness_index == IS_CONSTANT) { + ASSERT(a.get_value() + b.get_value() + c.get_value() + d.get_value() == 0); return; } @@ -958,6 +979,7 @@ void field_t::evaluate_polynomial_identity(const field_t& a, if (a.witness_index == IS_CONSTANT && b.witness_index == IS_CONSTANT && c.witness_index == IS_CONSTANT && d.witness_index == IS_CONSTANT) { + ASSERT((a.get_value() * b.get_value() + c.get_value() + d.get_value()).is_zero()); return; } @@ -1187,7 +1209,6 @@ std::vector> field_t::decompose_into_bits( // y_lo = (2**128 + p_lo) - sum_lo field_t y_lo = (-sum) + (p_lo + shift); y_lo += shifted_high_limb; - y_lo.normalize(); if constexpr (IsSimulator) { fr sum_lo = shift + p_lo - y_lo.get_value(); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp index 479c274ec57..77a1d4d8759 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp @@ -256,7 +256,7 @@ template class field_t { * Constants do not need to be normalized, as there is no underlying 'witness'; a constant's value is * wholly tracked by `this.additive_constant`, so we definitely don't want to set that to 0! **/ - field_t normalize() const; + [[nodiscard]] field_t normalize() const; bb::fr get_value() const; @@ -313,8 +313,26 @@ template class field_t { context->fix_witness(witness_index, get_value()); } + /** + * @brief Get the witness index of the current field element. + * + * @warning Are you sure you don't want to use + * get_normalized_witness_index? + * + * @return uint32_t + */ uint32_t get_witness_index() const { return witness_index; } + /** + * @brief Get the index of a normalized version of this element + * + * @details Most of the time when using field elements in other parts of stdlib we want to use this API instead of + * get_witness index. The reason is it will prevent some soundess vulnerabilities + * + * @return uint32_t + */ + uint32_t get_normalized_witness_index() const { return normalize().witness_index; } + std::vector> decompose_into_bits( size_t num_bits = 256, std::function(Builder* ctx, uint64_t, uint256_t)> get_bit = diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.test.cpp index 8da6527284c..546e79012ec 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.test.cpp @@ -107,6 +107,17 @@ template class stdlib_field : public testing::Test { run_test(-1, fr::modulus.get_msb() + 1, true); } + /** + * @brief Test that bool is converted correctly + * + */ + static void test_bool_conversion_regression() + { + Builder builder = Builder(); + field_ct one = field_ct(witness_ct(&builder, 1)); + bool_ct b_false = bool_ct(one * field_ct(0)); + EXPECT_FALSE(b_false.get_value()); + } /** * @brief Demonstrate current behavior of assert_equal. */ @@ -1099,6 +1110,11 @@ TYPED_TEST(stdlib_field, test_assert_equal) TestFixture::test_assert_equal(); } +TYPED_TEST(stdlib_field, test_bool_conversion_regression) +{ + TestFixture::test_bool_conversion_regression(); +} + TYPED_TEST(stdlib_field, test_div) { TestFixture::test_div(); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/plookup/plookup.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/plookup/plookup.test.cpp index 9c36c59a809..a76a95a9a1d 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/plookup/plookup.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/plookup/plookup.test.cpp @@ -555,8 +555,8 @@ TEST(stdlib_plookup, secp256k1_generator) const auto xhi = plookup_read::read_pair_from_table(MultiTableId::SECP256K1_XHI, index); const auto ylo = plookup_read::read_pair_from_table(MultiTableId::SECP256K1_YLO, index); const auto yhi = plookup_read::read_pair_from_table(MultiTableId::SECP256K1_YHI, index); - curve::fq_ct x = curve::fq_ct(xlo.first, xlo.second, xhi.first, xhi.second); - curve::fq_ct y = curve::fq_ct(ylo.first, ylo.second, yhi.first, yhi.second); + curve::fq_ct x = curve::fq_ct::unsafe_construct_from_limbs(xlo.first, xlo.second, xhi.first, xhi.second); + curve::fq_ct y = curve::fq_ct::unsafe_construct_from_limbs(ylo.first, ylo.second, yhi.first, yhi.second); const auto res = curve::g1_ct(x, y).get_value(); curve::fr scalar(i); @@ -573,8 +573,8 @@ TEST(stdlib_plookup, secp256k1_generator) const auto ylo = plookup_read::read_pair_from_table(MultiTableId::SECP256K1_YLO, circuit_naf_values[0]); const auto yhi = plookup_read::read_pair_from_table(MultiTableId::SECP256K1_YHI, circuit_naf_values[0]); - curve::fq_ct x = curve::fq_ct(xlo.first, xlo.second, xhi.first, xhi.second); - curve::fq_ct y = curve::fq_ct(ylo.first, ylo.second, yhi.first, yhi.second); + curve::fq_ct x = curve::fq_ct::unsafe_construct_from_limbs(xlo.first, xlo.second, xhi.first, xhi.second); + curve::fq_ct y = curve::fq_ct::unsafe_construct_from_limbs(ylo.first, ylo.second, yhi.first, yhi.second); accumulator = curve::g1_ct(x, y); } for (size_t i = 1; i < circuit_naf_values.size(); ++i) { @@ -590,8 +590,8 @@ TEST(stdlib_plookup, secp256k1_generator) const auto xhi = plookup_read::read_pair_from_table(MultiTableId::SECP256K1_XHI, circuit_naf_values[i]); const auto ylo = plookup_read::read_pair_from_table(MultiTableId::SECP256K1_YLO, circuit_naf_values[i]); const auto yhi = plookup_read::read_pair_from_table(MultiTableId::SECP256K1_YHI, circuit_naf_values[i]); - curve::fq_ct x = curve::fq_ct(xlo.first, xlo.second, xhi.first, xhi.second); - curve::fq_ct y = curve::fq_ct(ylo.first, ylo.second, yhi.first, yhi.second); + curve::fq_ct x = curve::fq_ct::unsafe_construct_from_limbs(xlo.first, xlo.second, xhi.first, xhi.second); + curve::fq_ct y = curve::fq_ct::unsafe_construct_from_limbs(ylo.first, ylo.second, yhi.first, yhi.second); accumulator = accumulator.montgomery_ladder(curve::g1_ct(x, y)); } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.cpp index a94fe57d157..a4a4befd848 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_verifier.cpp @@ -137,7 +137,8 @@ bool TranslatorRecursiveVerifier_::verify_translation( typename Flavor::FF>& translation_evaluations) { const auto reconstruct_from_array = [&](const auto& arr) { - const BF reconstructed = BF(arr[0], arr[1], arr[2], arr[3]); + const BF reconstructed = BF::construct_from_limbs(arr[0], arr[1], arr[2], arr[3]); + return reconstructed; }; diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.cpp index 180908807e5..9409d1c119c 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.cpp @@ -1669,7 +1669,7 @@ std::array UltraCircuitBuilder_::decompose_non_nat **/ template std::array UltraCircuitBuilder_::evaluate_non_native_field_multiplication( - const non_native_field_witnesses& input, const bool range_constrain_quotient_and_remainder) + const non_native_field_witnesses& input) { std::array a{ @@ -1697,8 +1697,6 @@ std::array UltraCircuitBuilder_::evaluate_non_nati this->get_variable(input.r[3]), }; constexpr FF LIMB_SHIFT = uint256_t(1) << DEFAULT_NON_NATIVE_FIELD_LIMB_BITS; - constexpr FF LIMB_SHIFT_2 = uint256_t(1) << (2 * DEFAULT_NON_NATIVE_FIELD_LIMB_BITS); - constexpr FF LIMB_SHIFT_3 = uint256_t(1) << (3 * DEFAULT_NON_NATIVE_FIELD_LIMB_BITS); constexpr FF LIMB_RSHIFT = FF(1) / FF(uint256_t(1) << DEFAULT_NON_NATIVE_FIELD_LIMB_BITS); constexpr FF LIMB_RSHIFT_2 = FF(1) / FF(uint256_t(1) << (2 * DEFAULT_NON_NATIVE_FIELD_LIMB_BITS)); @@ -1722,35 +1720,6 @@ std::array UltraCircuitBuilder_::evaluate_non_nati const uint32_t hi_2_idx = this->add_variable(hi_2); const uint32_t hi_3_idx = this->add_variable(hi_3); - // Sometimes we have already applied range constraints on the quotient and remainder - if (range_constrain_quotient_and_remainder) { - // /** - // * r_prime - r_0 - r_1 * 2^b - r_2 * 2^2b - r_3 * 2^3b = 0 - // * - // * w_4_omega - w_4 + w_1(2^b) + w_2(2^2b) + w_3(2^3b) = 0 - // * - // **/ - create_big_add_gate( - { input.r[1], input.r[2], input.r[3], input.r[4], LIMB_SHIFT, LIMB_SHIFT_2, LIMB_SHIFT_3, -1, 0 }, true); - // TODO(https://github.com/AztecProtocol/barretenberg/issues/879): dummy necessary for preceeding big add gate - create_dummy_gate(blocks.arithmetic, this->zero_idx, this->zero_idx, this->zero_idx, input.r[0]); - range_constrain_two_limbs(input.r[0], input.r[1]); - range_constrain_two_limbs(input.r[2], input.r[3]); - - // /** - // * q_prime - q_0 - q_1 * 2^b - q_2 * 2^2b - q_3 * 2^3b = 0 - // * - // * w_4_omega - w_4 + w_1(2^b) + w_2(2^2b) + w_3(2^3b) = 0 - // * - // **/ - create_big_add_gate( - { input.q[1], input.q[2], input.q[3], input.q[4], LIMB_SHIFT, LIMB_SHIFT_2, LIMB_SHIFT_3, -1, 0 }, true); - // TODO(https://github.com/AztecProtocol/barretenberg/issues/879): dummy necessary for preceeding big add gate - create_dummy_gate(blocks.arithmetic, this->zero_idx, this->zero_idx, this->zero_idx, input.q[0]); - range_constrain_two_limbs(input.q[0], input.q[1]); - range_constrain_two_limbs(input.q[2], input.q[3]); - } - // TODO(https://github.com/AztecProtocol/barretenberg/issues/879): Originally this was a single arithmetic gate. // With trace sorting, we must add a dummy gate since the add gate would otherwise try to read into an aux gate that // has been sorted out of sequence. @@ -1834,12 +1803,12 @@ void UltraCircuitBuilder_::process_non_native_field_multiplicat { for (size_t i = 0; i < cached_partial_non_native_field_multiplications.size(); ++i) { auto& c = cached_partial_non_native_field_multiplications[i]; - for (size_t j = 0; j < 5; ++j) { + for (size_t j = 0; j < c.a.size(); ++j) { c.a[j] = this->real_variable_index[c.a[j]]; c.b[j] = this->real_variable_index[c.b[j]]; } } - cached_partial_non_native_field_multiplication::deduplicate(cached_partial_non_native_field_multiplications); + cached_partial_non_native_field_multiplication::deduplicate(cached_partial_non_native_field_multiplications, this); // iterate over the cached items and create constraints for (const auto& input : cached_partial_non_native_field_multiplications) { diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp index cdf460d035f..821339329fc 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp @@ -21,12 +21,11 @@ namespace bb { template struct non_native_field_witnesses { // first 4 array elements = limbs - // 5th element = prime basis limb - std::array a; - std::array b; - std::array q; - std::array r; - std::array neg_modulus; + std::array a; + std::array b; + std::array q; + std::array r; + std::array neg_modulus; FF modulus; }; @@ -196,31 +195,48 @@ class UltraCircuitBuilder_ : public CircuitBuilderBase a; - std::array b; - FF lo_0; - FF hi_0; - FF hi_1; + std::array a; + std::array b; + uint32_t lo_0; + uint32_t hi_0; + uint32_t hi_1; bool operator==(const cached_partial_non_native_field_multiplication& other) const { bool valid = true; - for (size_t i = 0; i < 5; ++i) { + for (size_t i = 0; i < 4; ++i) { valid = valid && (a[i] == other.a[i]); valid = valid && (b[i] == other.b[i]); } return valid; } - static void deduplicate(std::vector& vec) + /** + * @brief Dedupilcate cache entries which represent multiplication of the same witnesses + * + * @details While a and b witness vectors are the same, lo_0, hi_0 and hi_1 can vary, so we have to connect them + * or there is a vulnerability + * + * @param vec + * @param circuit_builder + */ + static void deduplicate(std::vector& vec, + UltraCircuitBuilder_* circuit_builder) { std::unordered_set> seen; std::vector uniqueVec; for (const auto& item : vec) { - if (seen.insert(item).second) { + auto [existing_element, not_in_set] = seen.insert(item); + // Memorize if not in set yet + if (not_in_set) { uniqueVec.push_back(item); + } else { + // If we already have a representative, we need to connect the outputs together + circuit_builder->assert_equal(item.lo_0, (*existing_element).lo_0); + circuit_builder->assert_equal(item.hi_0, (*existing_element).hi_0); + circuit_builder->assert_equal(item.hi_1, (*existing_element).hi_1); } } @@ -801,8 +817,7 @@ class UltraCircuitBuilder_ : public CircuitBuilderBase decompose_non_native_field_double_width_limb( const uint32_t limb_idx, const size_t num_limb_bits = (2 * DEFAULT_NON_NATIVE_FIELD_LIMB_BITS)); - std::array evaluate_non_native_field_multiplication( - const non_native_field_witnesses& input, const bool range_constrain_quotient_and_remainder = true); + std::array evaluate_non_native_field_multiplication(const non_native_field_witnesses& input); std::array queue_partial_non_native_field_multiplication(const non_native_field_witnesses& input); typedef std::pair scaled_witness; typedef std::tuple add_simple; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_honk.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_honk.test.cpp index 62dfe74c9c1..79f6406cbe6 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_honk.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_honk.test.cpp @@ -756,22 +756,20 @@ TYPED_TEST(UltraHonkTests, non_native_field_multiplication) const auto split_into_limbs = [&](const uint512_t& input) { constexpr size_t NUM_BITS = 68; - std::array limbs; + std::array limbs; limbs[0] = input.slice(0, NUM_BITS).lo; limbs[1] = input.slice(NUM_BITS * 1, NUM_BITS * 2).lo; limbs[2] = input.slice(NUM_BITS * 2, NUM_BITS * 3).lo; limbs[3] = input.slice(NUM_BITS * 3, NUM_BITS * 4).lo; - limbs[4] = fr(input.lo); return limbs; }; - const auto get_limb_witness_indices = [&](const std::array& limbs) { - std::array limb_indices; + const auto get_limb_witness_indices = [&](const std::array& limbs) { + std::array limb_indices; limb_indices[0] = circuit_builder.add_variable(limbs[0]); limb_indices[1] = circuit_builder.add_variable(limbs[1]); limb_indices[2] = circuit_builder.add_variable(limbs[2]); limb_indices[3] = circuit_builder.add_variable(limbs[3]); - limb_indices[4] = circuit_builder.add_variable(limbs[4]); return limb_indices; }; const uint512_t BINARY_BASIS_MODULUS = uint512_t(1) << (68 * 4);